def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=D)) net.add(Activation(activation)) net.add(Linear(output_dim=D)) net.add_shortcut() net.add(Activation(activation))
def mlp_00(mark, memory_depth, layer_dim, layer_num, learning_rate, activation='relu'): # Configurations pass # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) for i in range(layer_num): nn.add(Linear(output_dim=layer_dim)) nn.add(Activation(activation)) nn.add(Linear(output_dim=1)) # Build model model.default_build(learning_rate) # Return model return model
def vn_00(memory_depth, mark, degree=None, homo_str=0.0): D = memory_depth hidden_dims = [[40] * 4, [40] * 5] if degree is None: degree = len(hidden_dims) + 1 elif degree < 1: raise ValueError('!! Degree must be greater than 1') activation = lambda: Activation('relu') learning_rate = 0.001 reg = 0.00 reg = None # Initiate model model = NeuralNet(D, mark, degree=degree) for order in range(2, degree + 1): dims = hidden_dims[order - 2] for dim in dims: model.nn.T[order].add( Linear(dim, weight_regularizer='l2', strength=reg)) model.nn.T[order].add(activation()) model.nn.T[order].add(Linear(1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err%', homo_strength=homo_str, optimizer=tf.train.AdamOptimizer(learning_rate)) return model # endregion : Volterra Networks
def bres_net_res0(th, activation='relu'): assert isinstance(th, NlsHub) # Initiate a neural net model th.mark = '{}-{}'.format(th.mark, 'res') model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet) nn = model.nn assert isinstance(nn, BResNet) nn.strict_residual = False # Add layers nn.add(Input([th.memory_depth])) nn.add(Linear(output_dim=th.hidden_dim)) nn.add(Activation(activation)) branch = nn.add_branch() branch.add(Linear(output_dim=1)) def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=th.hidden_dim)) net.add(Activation(activation)) net.add_shortcut() branch = nn.add_branch() branch.add(Linear(output_dim=1)) for _ in range(th.num_blocks - 1): add_res_block() nn.add(Linear(output_dim=1)) # Build model.default_build(th.learning_rate) return model
def add_res_block_poly(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=D)) net.add(Polynomial(order=order1)) net.add(Linear(output_dim=D)) net.add_shortcut() net.add(Polynomial(order=order2))
def res_00(th, activation='relu'): assert isinstance(th, NlsHub) model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor) nn = model.nn assert isinstance(nn, Predictor) # Add blocks nn.add(Input([th.memory_depth])) nn.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) nn.add(Activation(activation)) def add_res_block(): net = nn.add(ResidualNet()) net.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) net.add(Activation(activation)) net.add_shortcut() for _ in range(th.num_blocks): add_res_block() nn.add(Linear(output_dim=1)) # Build model model.default_build(th.learning_rate) # Return model return model
def multinput(th): assert isinstance(th, Config) model = Classifier(mark=th.mark) # Add hidden layers assert isinstance(th.fc_dims, list) subnet = model.add(inter_type=model.CONCAT) for dims in th.fc_dims: subsubnet = subnet.add() # Add input layer subsubnet.add(Input(sample_shape=th.input_shape)) subsubnet.add(Flatten()) assert isinstance(dims, list) for dim in dims: subsubnet.add(Linear(output_dim=dim)) if core.use_bn: subsubnet.add(BatchNormalization()) subsubnet.add(Activation(th.actype1)) # Add output layer model.add(Linear(output_dim=th.num_classes)) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate) model.build(optimizer=optimizer) return model
def test_00(memory, learning_rate=0.001): # Configurations mark = 'test' D = memory # Initiate model model = NeuralNet(memory, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory])) nn.add(Linear(output_dim=2 * D)) nn.add(Activation('relu')) nn.add(Linear(output_dim=2 * D)) nn.add(Activation('relu')) nn.add(Linear(output_dim=2 * D)) nn.add(Polynomial(order=3)) nn.add(Linear(output_dim=1)) # Build model model.default_build(learning_rate) # Return model return model
def multinput(th): assert isinstance(th, Config) model = Classifier(mark=th.mark) # Add hidden layers assert isinstance(th.fc_dims, list) subnet = model.add(inter_type=model.CONCAT) for dims in th.fc_dims: subsubnet = subnet.add() # Add input layer subsubnet.add(Input(sample_shape=th.input_shape)) subsubnet.add(Flatten()) assert isinstance(dims, list) for dim in dims: subsubnet.add(Linear(output_dim=dim)) # if cf10_core.use_bn: subsubnet.add(BatchNormalization()) subsubnet.add(Activation(th.actype1)) # Add output layer model.add(Linear(output_dim=th.num_classes)) # Build model model.build(metric=['accuracy', 'loss'], batch_metric='accuracy', eval_metric='accuracy') return model
def init_vn(mark, homo_str): D = NN_MEM_DEPTH hidden_dims = HIDDEN_DIMS degree = NN_DEGREE if degree is None: degree = len(hidden_dims) + 1 elif degree < 1: raise ValueError('!! Degree must be greater than 1') activation = lambda: Activation('relu') learning_rate = 0.001 reg = None # Initiate model model = NeuralNet(D, mark, degree=degree, orders=NN_ORDERS) for order in range(NN_MAX_VOL_ORD + 1, degree + 1): if order not in NN_ORDERS: continue dims = hidden_dims[order - NN_MAX_VOL_ORD - 1] for dim in dims: model.nn.add(order, Linear(dim, weight_regularizer='l2', strength=reg)) model.nn.add(order, activation()) model.nn.add(order, Linear(1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err %', homo_strength=homo_str, optimizer=tf.train.AdamOptimizer(learning_rate)) return model
def vanilla(mark, bn=False): z_dim = 100 model = VAE(z_dim=z_dim, mark=mark, classes=0, sample_shape=[784], output_shape=[28, 28, 1]) # Define encoder model.Q.add(Linear(output_dim=128)) model.Q.add(Activation.ReLU()) fork = Fork(name='mu_sigma') fork.add('mu', Linear(output_dim=z_dim)) fork.add('sigma', Linear(output_dim=z_dim)) model.Q.add(fork) # Define decoder model.P.add(Linear(output_dim=128)) model.P.add(Activation.ReLU()) model.P.add(Linear(output_dim=784)) model.P.add(Activation('sigmoid')) # Build model model.build() return model
def add_res_block(): net = nn.add(ResidualNet()) net.add(Linear(output_dim=th.hidden_dim)) net.add(Activation(activation)) net.add_shortcut() branch = nn.add_branch() branch.add(Linear(output_dim=1))
def mlp_00(memory_depth, mark): D = memory_depth hidden_dims = [10, 10, 10] activation = lambda: Activation('relu') learning_rate = 0.001 reg = 0.00 # Initiate model model = NeuralNet(memory_depth, mark) model.nn.add(Input([D])) for dim in hidden_dims: model.nn.add( Linear(output_dim=dim, weight_regularizer='l2', strength=reg)) model.nn.add(activation()) model.nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=reg)) # Build model model.nn.build(loss='euclid', metric='ratio', metric_name='Err%', optimizer=tf.train.AdamOptimizer(learning_rate)) return model
def svn_01(memory_depth, mark, hidden_dim, order1, learning_rate=0.001): strength = 0 # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) nn.add(Linear(output_dim=hidden_dim)) nn.add(Polynomial(order=order1)) nn.add( Linear(output_dim=1, weight_regularizer='l2', strength=strength, use_bias=False)) # Build model nn.build(loss='euclid', metric='rms_ratio', metric_name='RMS(err)%', optimizer=tf.train.AdamOptimizer(learning_rate)) # Return model return model
def conv_2d_test(th): assert isinstance(th, Config) # Initiate model th.mark = 'cnn_2d' + th.mark def data_dim(sample_rate=44100, duration=2, n_mfcc=40): audio_length = sample_rate * duration dim = (n_mfcc, 1 + int(np.floor(audio_length / 512)), 1) return dim dim = data_dim() model = Classifier(mark=th.mark) # Add input layer model.add(Input(sample_shape=[dim[0], dim[1], 1])) # Add hidden layers model.add(Conv2D(32, (4, 10), padding='same')) model.add(BatchNorm()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) # model.add(Dropout(0.7)) model.add(Conv2D(32, (4, 10), padding='same')) model.add(BatchNorm()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) # model.add(Dropout(0.7)) model.add(Conv2D(32, (4, 10), padding='same')) model.add(BatchNorm()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) # model.add(Dropout(0.7)) model.add(Conv2D(32, (4, 10), padding='same')) model.add(BatchNorm()) model.add(Activation('relu')) model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) # model.add(Dropout(0.7)) model.add(Flatten()) model.add(Linear(output_dim=64)) model.add(BatchNorm()) model.add(Activation('relu')) # Add output layer model.add(Linear(output_dim=41)) model.add(Activation('softmax')) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate) model.build(optimizer=optimizer) return model
def conv_test(th): assert isinstance(th, Config) # Initiate model th.mark = 'cnn' + th.mark model = Classifier(mark=th.mark) # Add input layer model.add(Input(sample_shape=[32000, 1])) # Add hidden layers model.add(Conv1D(filters=16, kernel_size=9, padding='valid')) model.add(Activation('relu')) model.add(Conv1D(filters=16, kernel_size=9, padding='valid')) model.add(Activation('relu')) model.add(MaxPool1D(pool_size=16, strides=16)) # model.add(Dropout(0.9)) model.add(Conv1D(filters=32, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(Conv1D(filters=32, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(MaxPool1D(pool_size=4, strides=4)) # model.add(Dropout(0.9)) model.add(Conv1D(filters=32, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(Conv1D(filters=32, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(MaxPool1D(pool_size=4, strides=4)) # model.add(Dropout(0.9)) # model.add(Conv1D(filters=256, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(Conv1D(filters=256, kernel_size=3, padding='valid')) model.add(Activation('relu')) model.add(GlobalMaxPooling1D()) # model.add(Dropout(0.8)) # model.add(Linear(output_dim=64)) model.add(Activation('relu')) model.add(Linear(output_dim=1028)) model.add(Activation('relu')) # Add output layer model.add(Linear(output_dim=41)) model.add(Activation('softmax')) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=th.learning_rate) model.build(optimizer=optimizer) return model
def fc_lstm(th): assert isinstance(th, Config) th.mark = 'fc_lstm_' + th.mark # Initiate a model model = Classifier(mark=th.mark, net_type=Recurrent) # Add input layer model.add(Input(sample_shape=th.input_shape)) # Add fc layers for dim in th.fc_dims: checker.check_positive_integer(dim) model.add(Linear(output_dim=dim)) # model.add(BatchNorm()) model.add(Activation('relu')) # Add lstm cells for dim in th.rc_dims: model.add(BasicLSTMCell(state_size=dim)) # Add output layer # model.add(Linear(output_dim=th.output_dim)) # Build model optimizer = tf.train.AdamOptimizer(th.learning_rate) model.build(optimizer) return model
def mlp_00(learning_rate=0.001, memory_depth=80): """ Performance on WH: [0] depth = 80 """ # Configuration hidden_dims = [2 * memory_depth] * 4 strength = 0 activation = 'lrelu' mark = 'mlp_D{}_{}_{}'.format(memory_depth, hidden_dims, activation) # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) lc._add_fc_relu_layers(nn, hidden_dims, activation, strength=strength) nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=strength)) # Build model # optimizer = tf.train.GradientDescentOptimizer(learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) nn.build(loss='euclid', metric='rms_ratio', metric_name='RMS(err)%', optimizer=optimizer) # Return model return model
def _output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) if th.optimizer == 'adam': optimizer = tf.train.AdamOptimizer(th.learning_rate) elif th.optimizer == 'nesterov': th.momentum = 0.9 optimizer = tf.train.MomentumOptimizer(th.learning_rate, th.momentum, use_nesterov=True) elif th.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer(th.learning_rate) else: raise ValueError('!! In this task, th.optimizer must be a string') # Add output layer model.add(Linear(output_dim=th.output_dim)) model.add(Activation('softmax')) model.build(optimizer=th.get_optimizer(optimizer), metric=['loss', 'seq_acc'], batch_metric='seq_acc', eval_metric='seq_acc', last_only=True)
def mlp02(mark, memory_depth, branch_num, hidden_dim, learning_rate, activation, identity_init=False): # Initiate a neural net if identity_init: model = NeuralNet(memory_depth, mark=mark, bamboo_braod=True, identity_initial=True) else: model = NeuralNet(memory_depth, mark=mark, bamboo_broad=True, identity_initial=False) nn = model.nn assert isinstance(nn, Bamboo_Broad) # Add layers nn.add(Input([memory_depth])) branch = nn.add_branch() branch.add(Linear(output_dim=hidden_dim)) branch.add(Activation(activation)) branch.add(Linear(output_dim=1)) for _ in range(branch_num - 1): branch = nn.add_branch() branch.add( Linear(output_dim=hidden_dim, weight_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer())) branch.add(Activation(activation)) branch.add( Linear(output_dim=1, weight_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer())) # Build model model.default_build(learning_rate) # Return model return model
def deep_conv(mark): # Initiate predictor model = Classifier(mark=mark) model.add(Input(sample_shape=config.sample_shape)) def ConvBNReLU(filters, strength=1.0, bn=True): model.add( Conv2D(filters=filters, kernel_size=5, padding='same', kernel_regularizer=regularizers.L2(strength=strength))) if bn: model.add(BatchNorm()) model.add(Activation('relu')) # Conv layers reg = 1e-5 ConvBNReLU(32, reg) model.add(Dropout(0.5)) ConvBNReLU(32, reg) model.add(MaxPool2D(2, 2, 'same')) ConvBNReLU(64, reg) model.add(Dropout(0.5)) ConvBNReLU(64, reg) model.add(MaxPool2D(2, 2, 'same')) ConvBNReLU(128, reg) # FC layers model.add(Flatten()) model.add(Linear(256)) # model.add(BatchNorm()) model.add(Activation('relu')) model.add(Linear(256)) # model.add(BatchNorm()) model.add(Activation('relu')) model.add(Linear(config.y_dim)) # Build model model.build(optimizer=tf.train.AdamOptimizer(learning_rate=1e-4)) return model
def _add_fc_relu_layers(nn, hidden_dims, activation='relu', strength=0.0): assert isinstance(nn, Predictor) assert isinstance(hidden_dims, (tuple, list)) for dim in hidden_dims: nn.add( Linear(output_dim=dim, weight_regularizer='l2', strength=strength)) nn.add(Activation(activation))
def add_res_block(): net = nn.add(ResidualNet()) net.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) net.add(Activation(activation)) net.add_shortcut()
def tlp(memory_depth, hidden_dim, mark='tlp'): # Hyper-parameters learning_rate = 0.001 # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) nn.add(Linear(output_dim=hidden_dim)) nn.add(Activation('sigmoid')) nn.add(Linear(output_dim=1, use_bias=False)) # Build model model.default_build(learning_rate=learning_rate) return model
def output_and_build(model, th): assert isinstance(model, Predictor) assert isinstance(th, Config) # Add output layer model.add(Linear( output_dim=th.output_dim, use_bias=th.bias_out_units, )) model.build(metric='mse', loss='mse', last_only=True)
def dcgan(mark): # Initiate model model = GAN(z_dim=100, sample_shape=[28, 28, 1], mark=mark, classes=10) # Define generator model.G.add(Linear(output_dim=7 * 7 * 128)) model.G.add(Reshape(shape=[7, 7, 128])) model.G.add(BatchNorm()) model.G.add(Activation.ReLU()) model.G.add(Deconv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.G.add(BatchNorm()) model.G.add(Activation.ReLU()) model.G.add(Deconv2D(filters=1, kernel_size=5, strides=2, padding='same')) model.G.add(Activation('sigmoid')) # model.G.add(Activation('tanh')) # model.G.add(Rescale(from_scale=[-1., 1.], to_scale=[0., 1.])) # Define discriminator # model.D.add(Rescale(from_scale=[0., 1.], to_scale=[-1., 1.])) model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.D.add(Activation.LeakyReLU()) model.D.add(Conv2D(filters=128, kernel_size=5, strides=2, padding='same')) model.D.add(BatchNorm()) model.D.add(Activation.LeakyReLU()) model.D.add(Reshape(shape=[7 * 7 * 128])) model.D.add(Linear(output_dim=1)) model.D.add(Activation('sigmoid')) # Build model optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5) model.build(loss=pedia.cross_entropy, G_optimizer=optimizer, D_optimizer=optimizer) return model
def mlp00(mark): # Define model model = TDPlayer(mark=mark) model.add(Input(sample_shape=[15, 15])) model.add(Flatten()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(225)) model.add(Activation.ReLU()) model.add(Linear(1)) model.add(Activation('sigmoid')) # Build model model.build() return model
def output_and_build(model, th): assert isinstance(model, Classifier) assert isinstance(th, Config) # Add output layer model.add(Linear( output_dim=th.output_dim, use_bias=th.bias_out_units, )) model.add(Activation('softmax')) model.build(metric='accuracy', batch_metric='accuracy', last_only=True)
def svn(memory_depth, order, hidden_dim, mark='svn'): # Hyper-parameters learning_rate = 0.001 # Initiate a predictor model = NeuralNet(memory_depth, mark=mark) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([memory_depth])) nn.add(Linear(output_dim=hidden_dim)) nn.add(Polynomial(order=order)) nn.add(Linear(output_dim=1, use_bias=False)) # Build model # optimizer = tf.train.GradientDescentOptimizer(learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) model.default_build(optimizer=optimizer, learning_rate=learning_rate) return model
def mlp_00(th): assert isinstance(th, NlsHub) # Initiate a predictor model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor) nn = model.nn assert isinstance(nn, Predictor) # Add layers nn.add(Input([th.memory_depth])) for i in range(th.num_blocks): nn.add( Linear(output_dim=th.hidden_dim, weight_regularizer=th.regularizer, strength=th.reg_strength)) nn.add(Activation(th.actype1)) nn.add(Linear(output_dim=1)) # Build model model.default_build(th.learning_rate) # Return model return model