def get_network_mse_1(x_all, y_all, num_of_neurons=(2, 25, 2), activation='relu', lr=0.001, momentum_coef=0.0, weight_decay=0.0, p_dropout=0.0, num_of_epochs=100, val_split=0.2, verbose=0): """ model with 1 hidden layer, loss is MSE """ mse = LossMSE() model = Sequential() model.add( Linear(out=num_of_neurons[1], input_size=num_of_neurons[0], activation='relu')) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[2], activation=activation)) model.loss = mse sgd = SGD(lr, momentum_coef, weight_decay=weight_decay) report = sgd.train(model, x_all, y_all, num_of_epochs, val_split=val_split, verbose=verbose) return model, report
def nn(): return Sequential([ Convolution(output_depth=FLAGS.output_dim, input_depth=1, batch_size=FLAGS.batch_size, input_dim=FLAGS.image_dim, act='relu', stride_size=1, pad='VALID'), AvgPool(), Convolution(output_depth=25, stride_size=1, act='relu', pad='VALID'), AvgPool(), Convolution(output_depth=25, stride_size=1, act='relu', pad='VALID'), AvgPool(), Convolution(kernel_size=4, output_depth=100, stride_size=1, act='relu', pad='VALID'), AvgPool(), Convolution(kernel_size=1, output_depth=FLAGS.output_dim, stride_size=1, pad='VALID'), Softmax() ])
def nn(): return Sequential([Linear(input_dim=784,output_dim=1296, act ='relu', batch_size=FLAGS.batch_size, keep_prob=0.8), Linear(1296, act ='relu'), Linear(1296, act ='relu'), Linear(10, act ='relu'), #Softmax() ])
def discriminator(): return Sequential([ Convolution(input_depth=1, output_depth=32, act='tanh', batch_size=FLAGS.batch_size, input_dim=28), MaxPool(), Convolution(output_depth=64, act='tanh'), MaxPool(), Linear(1) ])
def layers(x): # Define the layers of your network here return Sequential([ Linear(input_dim=784, output_dim=1296, act='relu', batch_size=FLAGS.batch_size), Linear(1296, act='relu'), Linear(1296, act='relu'), Linear(10), Softmax() ])
def get_network_ce_1(x_all, y_all, num_of_neurons=(2, 25, 2), activation='relu', lr=0.1, momentum_coef=0.0, weight_decay=0.0, p_dropout=0.0, num_of_epochs=100, val_split=0.2, verbose=0): """ 1 hidden layer, CE """ ce = LossCrossEntropy() model = Sequential() model.add( Linear(out=num_of_neurons[1], input_size=num_of_neurons[0], activation=activation)) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[2], activation='softmax')) model.loss = ce sgd = SGD(lr, momentum_coef, weight_decay=weight_decay) # initialize SGD optimizer with given learning rate, momentum coefficient and weight decay parameter sgd = SGD(lr, momentum_coef, weight_decay) # train model, take report report = sgd.train(model, x_all, y_all, num_of_epochs, val_split=val_split, verbose=verbose) # return model and report return model, report
def nn(): return Sequential([ Linear(input_dim=166, output_dim=256, act='relu', batch_size=FLAGS.batch_size), Linear(256, act='relu'), Linear(128, act='relu'), Linear(64, act='relu'), Linear(64, act='relu'), Linear(32, act='relu'), Linear(16, act='relu'), Linear(8, act='relu'), Linear(3, act='relu'), Softmax() ])
def nn(): return Sequential( [Convolution(kernel_size=3, output_depth=64, input_depth=3, batch_size=FLAGS.batch_size, input_dim=3, act='relu', stride_size=1, pad='SAME', first = True), Convolution(kernel_size=3, output_depth=64, input_depth=64, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), MaxPool(), Convolution(kernel_size=3, output_depth=128, input_depth=64, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=128, input_depth=128, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), MaxPool(), Convolution(kernel_size=3, output_depth=256, input_depth=128, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=256, input_depth=256, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=256, input_depth=256, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), MaxPool(), Convolution(kernel_size=3, output_depth=512, input_depth=256, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=512, input_depth=512, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=512, input_depth=512, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), MaxPool(), Convolution(kernel_size=3, output_depth=512, input_depth=512, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=512, input_depth=512, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), Convolution(kernel_size=3, output_depth=512, input_depth=512, batch_size=FLAGS.batch_size, act='relu', stride_size=1, pad='SAME'), MaxPool(), Convolution(kernel_size=7, output_depth=4096, stride_size=1, act='relu', pad='VALID'), Convolution(kernel_size=1, output_depth=4096, stride_size=1, act='relu', pad='VALID'), Convolution(kernel_size=1, output_depth=1000, stride_size=1, final = True, pad='VALID'), ])
def generator(): #pdb.set_trace() return Sequential([ Linear(input_dim=1024, output_dim=7 * 7 * 128, act='tanh', batch_size=FLAGS.batch_size), Convolution(input_dim=7, input_depth=128, output_depth=32, act='tanh'), #4x4 Upconvolution(output_depth=128, kernel_size=3), #8x8 Upconvolution(output_depth=256, kernel_size=5, stride_size=1, act='tanh', pad='VALID'), #12x12 Upconvolution(output_depth=32, kernel_size=3, act='tanh'), #24X24 Upconvolution(output_depth=1, kernel_size=5, stride_size=1, act='tanh', pad='VALID'), #28X28 ])
def nn(phase): return Sequential( [Convolution3D(kernel_size=3, output_depth=32, input_depth=1, batch_size=FLAGS.batch_size, input_dim=32, act='lrelu', phase = phase, stride_size=1, pad='SAME'), Convolution3D(kernel_size=3, output_depth=32, input_depth=32, batch_size=FLAGS.batch_size, act='lrelu', phase = phase, stride_size=1, pad='SAME'), MaxPool3D(), Convolution3D(kernel_size=3, output_depth=64, input_depth=32, batch_size=FLAGS.batch_size, act='lrelu', phase = phase, stride_size=1, pad='SAME'), Convolution3D(kernel_size=3, output_depth=64, input_depth=64, batch_size=FLAGS.batch_size, act='lrelu', phase = phase, stride_size=1, pad='SAME'), MaxPool3D(), Convolution3D(kernel_size=3, output_depth=128, input_depth=64, batch_size=FLAGS.batch_size, act='lrelu', phase = phase, stride_size=1, pad='SAME'), Convolution3D(kernel_size=3, output_depth=128, input_depth=64, batch_size=FLAGS.batch_size, act='lrelu', phase = phase, stride_size=1, pad='SAME'), MaxPool3D(), Convolution3D(kernel_size=4, output_depth=128, stride_size=1, act='lrelu', phase = phase, pad='VALID'), Convolution3D(kernel_size=1, output_depth=2, stride_size=1, phase = phase, final = True, pad='VALID') ])
xs, ys = mnist.train.next_batch(batch_size) else: xs, ys = mnist.test.next_batch(batch_size) return (2 * xs) - 1, ys mnist = input_data.read_data_sets('data', one_hot=True) with tf.Session() as sess: # GRAPH net = Sequential([ Linear(input_dim=784, output_dim=1200, act='relu', batch_size=batch_size, keep_prob=dropout), Linear(500, act='relu', keep_prob=dropout), Linear(10, act='linear', keep_prob=dropout), Softmax() ]) x = tf.placeholder(tf.float32, [batch_size, 784], name='x-input') y_labels = tf.placeholder(tf.float32, [batch_size, 10], name='y-input') y_pred = net.forward(x) correct_prediction = tf.equal(tf.argmax(y_labels, axis=1), tf.argmax(y_pred, axis=1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) trainer = net.fit(output=y_pred,
def nn(phase): net = Sequential([ Convolution(kernel_size=3, output_depth=64, input_depth=3, batch_size=32, input_dim=3, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), Convolution(kernel_size=3, output_depth=64, input_depth=64, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), MaxPool(), Convolution(kernel_size=3, output_depth=128, input_depth=64, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), Convolution(kernel_size=3, output_depth=128, input_depth=128, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), MaxPool(), Convolution(kernel_size=3, output_depth=256, input_depth=128, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), Convolution(kernel_size=3, output_depth=256, input_depth=256, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), Convolution(kernel_size=3, output_depth=256, input_depth=256, batch_size=32, act='relu', stride_size=1, pad='SAME', batch_norm=True, phase=phase), MaxPool(), Convolution(kernel_size=4, output_depth=512, stride_size=1, act='relu', pad='VALID', batch_norm=True, phase=phase), Convolution(kernel_size=1, output_depth=512, stride_size=1, act='relu', pad='VALID', batch_norm=True, phase=phase), Convolution(kernel_size=1, output_depth=10, stride_size=1, act='linear', pad='VALID', batch_norm=True, phase=phase), Softmax(), ]) return net
def get_network_ce_4(x_all, y_all, num_of_neurons=(2, 25, 25, 25, 2), activation='relu', lr=0.1, momentum_coef=0.0, weight_decay=0.0, p_dropout=0.0, num_of_epochs=100, val_split=0.2, verbose=0): """ model with 3 hidden layers, loss is CE """ ce = LossCrossEntropy() model = Sequential() model.add( Linear(out=num_of_neurons[1], input_size=num_of_neurons[0], activation=activation)) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[2], activation=activation)) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[3], activation=activation)) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[4], activation='softmax')) model.loss = ce sgd = SGD(lr, momentum_coef, weight_decay=weight_decay) report = sgd.train(model, x_all, y_all, num_of_epochs, val_split=val_split, verbose=verbose) return model, report
def get_network(x_all, y_all, num_of_hidden_layers=3, loss='ce', num_of_neurons=(2, 25, 25, 25, 2), activation='relu', lr=0.1, momentum_coef=0.0, weight_decay=0.0, p_dropout=0.0, num_of_epochs=100, val_split=0.2, verbose=0): """ creates model with given parameters x_all - features y_all - targets num_of_hidden_layers - int, number of hidden layers in model loss - 'ce' for Cross Entropy, 'mse' for Mean Squared Error num_of_neurons - tuple of ints with size num_of_hidden_layers + 2, first element is number of features in x_all and last element is number of possible targets activation - 'relu' for ReLu, 'tanh' for Tanh lr - float, learning rate momentum_coef - float in range (0, 1), momentum coefficient weight_decay - float, L2-regularization parameter p_dropout - float in range [0, 1), probability of dropout num_of_epochs - int, number of epochs val_split - float in range [0, 1), ratio of validation set verbose - 0 or 1, for printing out results """ # set loss and last activation if loss == 'ce': loss = LossCrossEntropy() last_activation = 'softmax' else: loss = LossMSE() last_activation = activation # initialize empty Sequential as model model = Sequential() # add linear layers with given activations and dropout layers after linear modules with given p_dropout if num_of_hidden_layers > 0: model.add( Linear(out=num_of_neurons[1], input_size=num_of_neurons[0], activation=activation)) model.add(Dropout(prob=p_dropout)) for i in range(num_of_hidden_layers - 1): model.add(Linear(out=num_of_neurons[i + 2], activation=activation)) model.add(Dropout(prob=p_dropout)) model.add(Linear(out=num_of_neurons[-1], activation=last_activation)) else: model.add( Linear(out=num_of_neurons[-1], input_size=num_of_neurons[0], activation=last_activation)) # set loss of model model.loss = loss sgd = SGD(lr, momentum_coef, weight_decay=weight_decay) report = sgd.train(model, x_all, y_all, num_of_epochs, val_split=val_split, verbose=verbose) return model, report
def main(): global EPOCHS, BATCH_SIZE, LEARNING_RATE # train_X, test_X, train_y, test_y = get_iris_data() # Saver name = "" print("Train? (y for train, n for test)") choice = input() train_flag = True if (choice =='n' or choice=='N'): df = pd.read_csv("data/out-test.csv") BATCH_SIZE = df.shape[0] EPOCHS = 1 train_flag = False name = input("Enter model file name: ") else: df = pd.read_csv("data/out-train.csv") cols = df.columns.values cols = np.delete(cols, [1]) train_X = df.loc[:,cols].values train_y = df["decile_score"].values y_train_ = train_y train_y = keras.utils.np_utils.to_categorical(train_y) print(train_X.shape) print(train_y.shape) # exit() # Layer's sizes x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias # h_size_1 = 256 # Number of hidden nodes # h_size_2 = 256 # Number of hidden nodes # h_size_3 = 128 # Number of hidden nodes # h_size_4 = 64 # Number of hidden nodes # h_size_5 = 64 # Number of hidden nodes # h_size_6 = 32 # Number of hidden nodes # h_size_7 = 16 # Number of hidden nodes # h_size_8 = 8 # Number of hidden nodes y_size = train_y.shape[1] # Number of outcomes (3 iris flowers) # Symbols X = tf.placeholder("float", shape=[None, x_size]) y = tf.placeholder("float", shape=[None, y_size]) net = Sequential([Linear(input_dim=166, output_dim=256, act ='relu', batch_size=BATCH_SIZE), Linear(256, act ='relu'), Linear(128, act ='relu'), Linear(64, act ='relu'), Linear(64, act ='relu'), Linear(32, act ='relu'), Linear(16, act ='relu'), Linear(8, act ='relu'), Linear(3, act ='relu'), Softmax()]) output = net.forward(tf.convert_to_tensor(X)) trainer = net.fit(output, y, loss='softmax_crossentropy', optimizer='adam', opt_params=[LEARNING_RATE])
def nn(): return Sequential([Convolution(output_depth=36,input_depth=1,batch_size=FLAGS.batch_size, input_dim=25, act ='relu', stride_size=1, pad='VALID'), Convolution(output_depth=25, kernel_size=5, stride_size=3, act='relu', pad='VALID'), Convolution(output_depth=16, kernel_size=5, stride_size=2, act='relu', pad='VALID'), Convolution(output_depth=2, kernel_size=1, stride_size=1, act='relu', pad='VALID') ])