def get_classifier(input_data, num_classes): """Get VGG classifier layers as fc layers.""" flatten = sym.flatten(data=input_data, name="flatten") fc6 = sym.dense(data=flatten, units=4096, name="fc6") relu6 = sym.relu(data=fc6, name="relu6") drop6 = sym.dropout(data=relu6, rate=0.5, name="drop6") fc7 = sym.dense(data=drop6, units=4096, name="fc7") relu7 = sym.relu(data=fc7, name="relu7") drop7 = sym.dropout(data=relu7, rate=0.5, name="drop7") fc8 = sym.dense(data=drop7, units=num_classes, name="fc8") return fc8
def lstm_gate(op, U, b, x, num_units): """ op - nonlinearity operation x - input tensor of shape (1,a) U - weight matrix of shape (a,b) b - bias (1,b) return tensor of shape (1,b) """ return op(sym.dense(x, U, b, units=num_units))
def lstm_and_dense_layer(num_timesteps: int, num_inputs: int, num_classes: int, num_hidden: int, dense_init=np.random.normal): """ Use `model` with `num_hidden` LSTM units, translate them `num_classes` classes using a dense layer. """ W = Variable("W", init=dense_init(size=[num_hidden, num_classes])) b = Variable("b", init=dense_init(size=[1, num_classes])) X, outputs = lstm_layer(num_timesteps, num_inputs, num_units=num_hidden) cls = sym.dense(outputs[-1], W, b, units=num_classes) return X, cls
def _get_model(dshape): data = sym.Variable('data', shape=dshape) fc1 = sym.dense(data, units=dshape[-1]*2, use_bias=True) left, right = sym.split(fc1, indices_or_sections=2, axis=1) return sym.Group(((left + 1), (right - 1)))
def _get_model(dshape): data = sym.Variable('data', shape=dshape) fc1 = sym.dense(data, units=dshape[-1] * 2, use_bias=True) left, right = sym.split(fc1, indices_or_sections=2, axis=1) return sym.Group(((left + 1), (right - 1)))