def simple_bn(x, gamma, beta, moving_mean, moving_var, axis=1, epsilon=1e-5, shape=None): # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma) shift = sym.elemwise_add( sym.elemwise_mul(sym.negative(moving_mean), scale), beta) # for 2D num_newaxis=len(shape) - axis - 1 if num_newaxis: scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis) shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis) return x * scale + shift
def simple_bn(x, gamma, beta, moving_mean, moving_var, axis=1, epsilon=1e-5, shape=None): # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma) shift = sym.elemwise_add( sym.elemwise_mul(sym.negative(moving_mean), scale), beta) # for 2D num_newaxis = len(shape) - axis - 1 if num_newaxis: scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis) shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis) return x * scale + shift
def nn(m: Model): v_images = sym.Variable("images", shape=(BATCH_SIZE, 1, 28, 28), dtype=0) v_true_labels = sym.Variable("true_labels", shape=(BATCH_SIZE, 10), dtype=0) x = v_images x = sym.reshape(data=x, shape=(BATCH_SIZE, 28 * 28)) x = sym.dense(data=x, units=10) logits = x x = -sym.elemwise_mul(v_true_labels, sym.log_softmax(x)) loss = sym.sum(x) / BATCH_SIZE # This is not really accuracy, because we use softmax instead of hardmax accuracy = sym.sum(v_true_labels * sym.softmax(logits)) / BATCH_SIZE # We have to somehow list all weights (the corresponding variables are generated automatically) weight_vars = [ v for v in loss.list_input_variables() if v.attr('name') not in ['images', 'true_labels'] ] optimizer = SGD(learning_rate=1e-4) update_step = optimizer.minimize(loss, var=weight_vars) tgraph = nnvm.graph.create(sym.Group( [loss, update_step])).apply("InferShape").apply("InferType") fgraph = nnvm.graph.create(sym.Group( [loss, accuracy])).apply("InferShape").apply("InferType") m.tgraph = tgraph m.fgraph = fgraph m.optimizer = optimizer m.loss = loss return m
def test_cnn_gradients(): # input data h = 128 w = 128 data_shape = (1000, 3, h, w) data = sym.Variable('data', shape=data_shape, dtype=0) # conv2d num_channels = 64 kernel_size = 32 conv_w_shape = (num_channels, 3, kernel_size, kernel_size) conv_b_shape = (num_channels, ) conv_w = sym.Variable('conv_w', shape=conv_w_shape) conv_b = sym.Variable('conv_b', shape=conv_b_shape) conv1 = sym.conv2d(data=data, weight=conv_w, bias=conv_b, channels=num_channels, kernel_size=(kernel_size, kernel_size), name='conv1') # relu1 relu1 = sym.relu(data=conv1, name='relu1') # max pooling max_pooling1 = sym.max_pool2d(data=relu1, pool_size=(2, 2), name='max_pooling1') # flatten flatten1 = sym.flatten(data=max_pooling1) # shape after flatten flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels # dense1 dense1_hidden_units = 100 dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units) # relu2 relu2 = sym.relu(data=dense1, name='relu2') # dense2 dense2_hidden_units = 10 dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units) # softmax mlp = sym.softmax(data=dense2, name='softmax') # fake non-sparse label label = sym.full_like(mlp, fill_value=1) # cross entropy loss ce_loss = sym.sum(sym.elemwise_mul(sym.log_softmax(dense2), label), axis=1, keepdims=True, name="ce_loss") # input variables: # print grad_g.symbol.list_input_names() # >> ['data', 'conv_w', 'conv_b', # 'dense1_weight', 'dense1_bias', # 'dense2_weight', 'dense2_bias'] # output gradient variables: # print grad_g.symbol.list_output_names() # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias', # 'dense1_grad_weight', 'dense1_grad_bias', # 'dense2_grad_weight', 'dense2_grad_bias'] grad_g = graph_util.get_gradient_graph(ce_loss, ce_loss.list_input_variables()) # infer shape in_shapes, out_shapes = graph_util.infer_shape(grad_g) # forward graph shape assert in_shapes == [ list(data_shape), list(conv_w_shape), list(conv_b_shape), [dense1_hidden_units, flatten_out_shape], [dense1_hidden_units], [dense2_hidden_units, dense1_hidden_units], [dense2_hidden_units] ] # input grads shape should be equal with input shape assert in_shapes == out_shapes # output grads w.r.t input variables grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables()) # gradients number should be equal with grad_input number assert len(grads) == len(ce_loss.list_input_variables()) # infer type in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g) assert out_dtypes == [ 'float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float32' ]
def test_cnn_gradients(): # input data h = 128 w = 128 data_shape = (1000, 3, h, w) data = sym.Variable('data', shape=data_shape, dtype=0) # conv2d num_channels = 64 kernel_size = 32 conv_w_shape = (num_channels, 3, kernel_size, kernel_size) conv_b_shape = (num_channels,) conv_w = sym.Variable('conv_w', shape=conv_w_shape) conv_b = sym.Variable('conv_b', shape=conv_b_shape) conv1 = sym.conv2d(data=data, weight=conv_w, bias=conv_b, channels=num_channels, kernel_size=(kernel_size, kernel_size), name='conv1') # relu1 relu1 = sym.relu(data=conv1, name='relu1') # max pooling max_pooling1 = sym.max_pool2d(data=relu1, pool_size=(2, 2), name='max_pooling1') # flatten flatten1 = sym.flatten(data=max_pooling1) # shape after flatten flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels # dense1 dense1_hidden_units = 100 dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units) # relu2 relu2 = sym.relu(data=dense1, name='relu2') # dense2 dense2_hidden_units = 10 dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units) # softmax mlp = sym.softmax(data=dense2, name='softmax') # fake non-sparse label label = sym.full_like(mlp, fill_value=1) # cross entropy loss ce_loss = sym.sum( sym.elemwise_mul(sym.log_softmax(dense2), label), axis=1, keepdims=True, name="ce_loss") # input variables: # print grad_g.symbol.list_input_names() # >> ['data', 'conv_w', 'conv_b', # 'dense1_weight', 'dense1_bias', # 'dense2_weight', 'dense2_bias'] # output gradient variables: # print grad_g.symbol.list_output_names() # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias', # 'dense1_grad_weight', 'dense1_grad_bias', # 'dense2_grad_weight', 'dense2_grad_bias'] grad_g = graph_util.get_gradient_graph(ce_loss, ce_loss.list_input_variables()) # infer shape in_shapes, out_shapes = graph_util.infer_shape(grad_g) # forward graph shape assert in_shapes == [list(data_shape), list(conv_w_shape), list(conv_b_shape), [dense1_hidden_units, flatten_out_shape], [dense1_hidden_units], [dense2_hidden_units, dense1_hidden_units], [dense2_hidden_units]] # input grads shape should be equal with input shape assert in_shapes == out_shapes # output grads w.r.t input variables grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables()) # gradients number should be equal with grad_input number assert len(grads) == len(ce_loss.list_input_variables()) # infer type in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g) assert out_dtypes == ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float32']