def vanilla(mark, bn=False): z_dim = 100 model = VAE(z_dim=z_dim, mark=mark, classes=0, sample_shape=[784], output_shape=[28, 28, 1]) # Define encoder model.Q.add(Linear(output_dim=128)) model.Q.add(Activation.ReLU()) fork = Fork(name='mu_sigma') fork.add('mu', Linear(output_dim=z_dim)) fork.add('sigma', Linear(output_dim=z_dim)) model.Q.add(fork) # Define decoder model.P.add(Linear(output_dim=128)) model.P.add(Activation.ReLU()) model.P.add(Linear(output_dim=784)) model.P.add(Activation('sigmoid')) # Build model model.build() return model
def test_00(memory, learning_rate=0.001): # Configurations mark = 'test' D = memory # Initiate model model = NeuralNet(memory, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory])) nn.add(Linear(output_dim=2 * D)) nn.add(Activation('relu')) nn.add(Linear(output_dim=2 * D)) nn.add(Activation('relu')) nn.add(Linear(output_dim=2 * D)) nn.add(Polynomial(order=3)) nn.add(Linear(output_dim=1)) # Build model model.default_build(learning_rate) # Return model return model
def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=D)) net.add(Activation(activation)) net.add(Linear(output_dim=D)) net.add_shortcut() net.add(Activation(activation))
def vn_00(memory_depth, mark, degree=None, homo_str=0.0): D = memory_depth hidden_dims = [[40] * 4, [40] * 5] if degree is None: degree = len(hidden_dims) + 1 elif degree < 1: raise ValueError('!! Degree must be greater than 1') activation = lambda: Activation('relu') learning_rate = 0.001 reg = 0.00 reg = None # Initiate model model = NeuralNet(D, mark, degree=degree) for order in range(2, degree + 1): dims = hidden_dims[order - 2] for dim in dims: model.nn.T[order].add( Linear(dim, weight_regularizer='l2', strength=reg)) model.nn.T[order].add(activation()) model.nn.T[order].add(Linear(1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err%', homo_strength=homo_str, optimizer=tf.train.AdamOptimizer(learning_rate)) return model # endregion : Volterra Networks
def mlp_00(memory_depth, mark): D = memory_depth hidden_dims = [10, 10, 10] activation = lambda: Activation('relu') learning_rate = 0.001 reg = 0.00 # Initiate model model = NeuralNet(memory_depth, mark) model.nn.add(Input([D])) for dim in hidden_dims: model.nn.add( Linear(output_dim=dim, weight_regularizer='l2', strength=reg)) model.nn.add(activation()) model.nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err%', optimizer=tf.train.AdamOptimizer(learning_rate)) return model
def fc_lstm(th): assert isinstance(th, Config) th.mark = 'fc_lstm_' + th.mark # Initiate a model model = Classifier(mark=th.mark, net_type=Recurrent) # Add input layer model.add(Input(sample_shape=th.input_shape)) # Add fc layers for dim in th.fc_dims: checker.check_positive_integer(dim) model.add(Linear(output_dim=dim)) # model.add(BatchNorm()) model.add(Activation('relu')) # Add lstm cells for dim in th.rc_dims: model.add(BasicLSTMCell(state_size=dim)) # Add output layer # model.add(Linear(output_dim=th.output_dim)) # Build model optimizer = tf.train.AdamOptimizer(th.learning_rate) model.build(optimizer) return model
def bres_net_res0(th, activation='relu'): assert isinstance(th, NlsHub) # Initiate a neural net model th.mark = '{}-{}'.format(th.mark, 'res') model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet) nn = model.nn assert isinstance(nn, BResNet) nn.strict_residual = False # Add layers nn.add(Input([th.memory_depth])) nn.add(Linear(output_dim=th.hidden_dim)) nn.add(Activation(activation)) branch = nn.add_branch() branch.add(Linear(output_dim=1)) def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=th.hidden_dim)) net.add(Activation(activation)) net.add_shortcut() branch = nn.add_branch() branch.add(Linear(output_dim=1)) for _ in range(th.num_blocks - 1): add_res_block() nn.add(Linear(output_dim=1)) # Build model.default_build(th.learning_rate) return model
def mlp_00(mark, memory_depth, layer_dim, layer_num, learning_rate, activation='relu'): # Configurations pass # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) for i in range(layer_num): nn.add(Linear(output_dim=layer_dim)) nn.add(Activation(activation)) nn.add(Linear(output_dim=1)) # Build model model.default_build(learning_rate) # Return model return model
def res_00(th, activation='relu'): assert isinstance(th, NlsHub) model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor) nn = model.nn assert isinstance(nn, Predictor) # Add blocks nn.add(Input([th.memory_depth])) nn.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) nn.add(Activation(activation)) def add_res_block(): net = nn.add(ResidualNet()) net.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) net.add(Activation(activation)) net.add_shortcut() for _ in range(th.num_blocks): add_res_block() nn.add(Linear(output_dim=1)) # Build model model.default_build(th.learning_rate) # Return model return model
def _output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) if th.optimizer == 'adam': optimizer = tf.train.AdamOptimizer(th.learning_rate) elif th.optimizer == 'nesterov': th.momentum = 0.9 optimizer = tf.train.MomentumOptimizer(th.learning_rate, th.momentum, use_nesterov=True) elif th.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer(th.learning_rate) else: raise ValueError('!! In this task, th.optimizer must be a string') # Add output layer model.add(Linear(output_dim=th.output_dim)) model.add(Activation('softmax')) model.build(optimizer=th.get_optimizer(optimizer), metric=['loss', 'seq_acc'], batch_metric='seq_acc', eval_metric='seq_acc', last_only=True)
def init_vn(mark, homo_str): D = NN_MEM_DEPTH hidden_dims = HIDDEN_DIMS degree = NN_DEGREE if degree is None: degree = len(hidden_dims) + 1 elif degree < 1: raise ValueError('!! Degree must be greater than 1') activation = lambda: Activation('relu') learning_rate = 0.001 reg = None # Initiate model model = NeuralNet(D, mark, degree=degree, orders=NN_ORDERS) for order in range(NN_MAX_VOL_ORD + 1, degree + 1): if order not in NN_ORDERS: continue dims = hidden_dims[order - NN_MAX_VOL_ORD - 1] for dim in dims: model.nn.add(order, Linear(dim, weight_regularizer='l2', strength=reg)) model.nn.add(order, activation()) model.nn.add(order, Linear(1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err %', homo_strength=homo_str, optimizer=tf.train.AdamOptimizer(learning_rate)) return model
def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=th.hidden_dim)) net.add(Activation(activation)) net.add_shortcut() branch = nn.add_branch() branch.add(Linear(output_dim=1))
def multinput(th): assert isinstance(th, Config) model = Classifier(mark=th.mark) # Add hidden layers assert isinstance(th.fc_dims, list) subnet = model.add(inter_type=model.CONCAT) for dims in th.fc_dims: subsubnet = subnet.add() # Add input layer subsubnet.add(Input(sample_shape=th.input_shape)) subsubnet.add(Flatten()) assert isinstance(dims, list) for dim in dims: subsubnet.add(Linear(output_dim=dim)) if core.use_bn: subsubnet.add(BatchNormalization()) subsubnet.add(Activation(th.actype1)) # Add output layer model.add(Linear(output_dim=th.num_classes)) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate) model.build(optimizer=optimizer) return model
def multinput(th): assert isinstance(th, Config) model = Classifier(mark=th.mark) # Add hidden layers assert isinstance(th.fc_dims, list) subnet = model.add(inter_type=model.CONCAT) for dims in th.fc_dims: subsubnet = subnet.add() # Add input layer subsubnet.add(Input(sample_shape=th.input_shape)) subsubnet.add(Flatten()) assert isinstance(dims, list) for dim in dims: subsubnet.add(Linear(output_dim=dim)) # if cf10_core.use_bn: subsubnet.add(BatchNormalization()) subsubnet.add(Activation(th.actype1)) # Add output layer model.add(Linear(output_dim=th.num_classes)) # Build model model.build(metric=['accuracy', 'loss'], batch_metric='accuracy', eval_metric='accuracy') return model
def mlp_res00(mark, memory_depth, branch_num, hidden_dim, learning_rate, activation, identity_init=True): # Initiate a neural net if identity_init: model = NeuralNet(memory_depth, mark=mark, bamboo=True, identity_initial=True) else: model = NeuralNet(memory_depth, mark=mark, bamboo=True, identity_initial=False) nn = model.nn assert isinstance(nn, Bamboo) # Add layers nn.add(Input([memory_depth])) for _ in range(branch_num): nn.add(Linear(output_dim=hidden_dim)) nn.add(Activation(activation)) branch = nn.add_branch() branch.add(Linear(output_dim=1)) resnet = nn.add(ResidualNet()) resnet.add(Linear(output_dim=hidden_dim)) resnet.add(Activation(activation)) resnet.add(Linear(output_dim=hidden_dim)) resnet.add_shortcut() resnet.add(Activation(activation)) nn.add(Linear(output_dim=hidden_dim)) nn.add(Activation(activation)) nn.add(Linear(output_dim=1)) # Build model model.default_build(learning_rate) # Return model return model
def deep_conv(mark): # Initiate predictor model = Classifier(mark=mark) model.add(Input(sample_shape=config.sample_shape)) def ConvBNReLU(filters, strength=1.0, bn=True): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation('relu')) # Conv layers reg = 1e-5 ConvBNReLU(32, reg) model.add(Dropout(0.5)) ConvBNReLU(32, reg) model.add(MaxPool2D(2, 2, 'same')) ConvBNReLU(64, reg) model.add(Dropout(0.5)) ConvBNReLU(64, reg) model.add(MaxPool2D(2, 2, 'same')) ConvBNReLU(128, reg) # FC layers model.add(Flatten()) model.add(Linear(256)) # model.add(BatchNorm()) model.add(Activation('relu')) model.add(Linear(256)) # model.add(BatchNorm()) model.add(Activation('relu')) model.add(Linear(config.y_dim)) # Build model model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4)) return model
def mlp02(mark, memory_depth, branch_num, hidden_dim, learning_rate, activation, identity_init=False): # Initiate a neural net if identity_init: model = NeuralNet(memory_depth, mark=mark, bamboo_braod=True, identity_initial=True) else: model = NeuralNet(memory_depth, mark=mark, bamboo_broad=True, identity_initial=False) nn = model.nn assert isinstance(nn, Bamboo_Broad) # Add layers nn.add(Input([memory_depth])) branch = nn.add_branch() branch.add(Linear(output_dim=hidden_dim)) branch.add(Activation(activation)) branch.add(Linear(output_dim=1)) for _ in range(branch_num - 1): branch = nn.add_branch() branch.add( Linear(output_dim=hidden_dim, weight_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer())) branch.add(Activation(activation)) branch.add( Linear(output_dim=1, weight_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer())) # Build model model.default_build(learning_rate) # Return model return model
def _add_fc_relu_layers(nn, hidden_dims, activation='relu', strength=0.0): assert isinstance(nn, Predictor) assert isinstance(hidden_dims, (tuple, list)) for dim in hidden_dims: nn.add( Linear(output_dim=dim, weight_regularizer='l2', strength=strength)) nn.add(Activation(activation))
def add_res_block(): net = nn.add(ResidualNet()) net.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) net.add(Activation(activation)) net.add_shortcut()
def conv_bn_relu(filters, twod=True, bn=True): if twod: subsubnet.add( Conv2D(filters=filters, kernel_size=(4, 10), padding='same')) else: subsubnet.add( Conv1D(filters=filters, kernel_size=9, padding='valid')) if bn: subsubnet.add(BatchNorm()) subsubnet.add(Activation('relu'))
def ConvLayer(filters, bn=False): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation.ReLU())
def finalize(th, model, add_output_layer=True): assert isinstance(th, Config) and isinstance(model, Classifier) # Add output layer if add_output_layer: model.add(Dense(num_neurons=th.num_classes)) model.add(Activation('softmax')) # Build model model.build(th.get_optimizer(), metric=['accuracy', 'loss'], batch_metric='accuracy', eval_metric='accuracy') return model
def output_and_build(model, th): assert isinstance(model, Classifier) and isinstance(th, Config) # Add output dropout if necessary if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout)) # Add output layer model.add(Dense(num_neurons=th.output_dim)) model.add(Activation('softmax')) # Build model model.build(loss=th.loss_string, metric=['loss', 'f1'], batch_metric='f1')
def output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) # Add output layer model.add(Dense(num_neurons=th.output_dim)) model.add(Activation('softmax')) model.build(metric='gen_acc', batch_metric='gen_acc', val_targets='val_targets')
def output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) # Add dropout if necessary if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout)) # Add output layer model.add(Dense(num_neurons=th.output_dim)) model.add(Activation('softmax')) model.build(loss='cross_entropy', metric='bpc', batch_metric='bpc')
def ConvBNReLU(filters, strength=1.0, bn=True): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation('relu'))
def mlp00(mark): # Define model model = TDPlayer(mark=mark) model.add(Input(sample_shape=[15, 15])) model.add(Flatten()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(1)) model.add(Activation('sigmoid')) # Build model model.build() return model
def finalize(th, model): assert isinstance(th, Config) and isinstance(model, Classifier) # Add output layer model.add(Dense(num_neurons=th.num_classes, prune_frac=0.05)) # model.add(Dense(num_neurons=th.num_classes)) model.add(Activation('softmax')) # Build model model.build(metric=['accuracy', 'loss'], batch_metric='accuracy', eval_metric='accuracy') return model
def output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) # Add output layer model.add(Linear( output_dim=th.output_dim, use_bias=th.bias_out_units, )) model.add(Activation('softmax')) model.build(metric='accuracy', batch_metric='accuracy', last_only=True)
def output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) # Add dropout if necessary if th.output_dropout > 0: model.add(Dropout(1 - th.output_dropout)) # Add output layer model.add(Dense(num_neurons=th.output_dim)) model.add(Activation('softmax')) model.build(last_only=True, metric=['accuracy', 'loss'], batch_metric='accuracy', eval_metric='accuracy')