def add_dropout(self, dropout_keep_prob=0.8): ''' add/append dropout layer Args: dropout_keep_prob: probability that the neuron is not dropped during training ''' self.layers.append(Dropout(dropout_keep_prob))
def __init__(self, layers: list, cost: Cost, optim, regularizer=None, gamma=0, dropout=None): print("Initialized model") self.weights = [] self.bias = [] self.layers = layers self.depth = len(layers) self.initialize_layers() self._optim = optim(self, cost, regularizer, gamma) self.input_layer = None self.output_layer = None # call generate graph function for i in range(0, self.depth - 1): self.layers[i].connect(self.layers[i + 1]) self._parameters = ModelParameters(self) self._forward_graph = NNGraphForward(self) self._backward_graph = NNGraphBackward(self) if dropout: self.dropout = Dropout(self, dropout) else: self.dropout = None
def _setup_layer(self, input_: ndarray) -> None: np.random.seed(self.seed) num_in = input_.shape[1] if self.weight_init == "glorot": scale = 2 / (num_in + self.neurons) else: scale = 1.0 # weights self.params = [] self.params.append( np.random.normal(loc=0, scale=scale, size=(num_in, self.neurons))) # bias self.params.append( np.random.normal(loc=0, scale=scale, size=(1, self.neurons))) self.operations = [ WeightMultiply(self.params[0]), BiasAdd(self.params[1]), self.activation ] if self.dropout < 1.0: self.operations.append(Dropout(self.dropout)) return None
def _setup_layer(self, input_: ndarray) -> ndarray: self.params = [] in_channels = input_.shape[1] if self.weight_init == "glorot": scale = 2 / (in_channels + self.out_channels) else: scale = 1.0 conv_param = np.random.normal( loc=0, scale=scale, size=( input_.shape[1], # input channels self.out_channels, self.param_size, self.param_size)) self.params.append(conv_param) self.operations = [] self.operations.append(Conv2D_Op(conv_param)) self.operations.append(self.activation) if self.flatten: self.operations.append(Flatten()) if self.dropout < 1.0: self.operations.append(Dropout(self.dropout)) return None
def Dropout(x, args, name): """ Create a dropout layer with :param x: the placeholder for the tensor :param args: the arguments for the dropout (Keep_prob) :param name: a label for the operation :return: a "dropped" tensor """ from dropout import Dropout return Dropout(x, args, name)
def relu_activation(X, Y, val_X, val_Y): """ RELU """ model = Model(learning_rate=0.001, batch_size=32, epochs=1000, optimizer=None) model.add(Dropout(0.5)) model.add(Dense(1024, 512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(512, 64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, 1)) print("Begin Training") model.train(X, Y, val_X, val_Y) model.save_history("experiments/relu.csv") print( "The CSV file is saved in the experiments folder. You can plot the graph using plot.py" )
def cnn_adam(X, Y, val_X, val_Y): """ CNN - ADAM """ model = Model(learning_rate=0.001, batch_size=32, epochs=200, optimizer=Adam()) model.add(Conv2D(2, (3, 3), activation='tanh')) model.add(Maxpool((2, 2), stride=2)) # 16x16 model.add(Dropout(0.5)) model.add(Conv2D(4, (3, 3), activation='tanh')) model.add(Maxpool((2, 2), stride=2)) # 8x8 model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, 32, activation='tanh')) model.add(Dense(32, 1)) print("Begin Training") model.train(X, Y, val_X, val_Y) model.save_history("experiments/cnn-adam.csv")
def ann_layers(X, Y, val_X, val_Y): """ Extra Layers with sigmoid """ model = Model(learning_rate=0.001, batch_size=32, epochs=1000, optimizer=None) model.add(Dropout(0.5)) model.add(Dense(1024, 512, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(512, 256, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(256, 32, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(32, 1)) print("Begin Training") model.train(X, Y, val_X, val_Y) model.save_history("experiments/ann-1024-512-256-32-1.csv") print( "The CSV file is saved in the experiments folder. You can plot the graph using plot.py" )
def experiment_three(X, Y, val_X, val_Y): """ A plot of sum of squares error on the training and validation set as a function of training iterations (for 1000 epochs) with a learning rate of 0.001, and dropout probability of 0.5 for the first, second and third layers. """ model = Model(learning_rate=0.001, batch_size=32, epochs=1000, optimizer=None) model.add(Dropout(0.5)) model.add(Dense(1024, 512, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(512, 64, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(64, 1)) print("Begin Training") model.train(X, Y, val_X, val_Y) model.save_history("experiments/experiment_three.csv") print( "The CSV file is saved in the experiments folder. You can plot the graph using plot.py" )
def setUp(self): self.d = Dropout() self.raw = {} self.raw['results'] = { 'completed': [[u'desktop', u'14452'], [u'mobile', u'4073'], [u'tablet', u'4287']], 'not_completed': [[u'desktop', u'30864'], [u'mobile', u'11439'], [u'tablet', u'9887']] } self.processed = {} self.processed['results'] = { 'completed': { 'desktop': 14452, 'mobile': 4073, 'tablet': 4287 }, 'not_completed': { 'desktop': 30864, 'mobile': 11439, 'tablet': 9887 } }
def train_single(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_in_hidden, input_vector) output_hidden = Activation.reLU(output_vector1) output_hidden *= Dropout.get_mask(output_vector1) output_vector2 = np.dot(self.weights_hidden_output, output_hidden) output_network = Activation.reLU(output_vector2) output_network *= Dropout.get_mask(output_vector2) output_errors = target_vector - output_network # update the weights: #tmp = output_errors * Derivative.sigmoid(output_network) try: tmp = output_errors * Derivative.reLU(output_network) tmp = self.learning_rate * np.dot(tmp, output_hidden.T) self.weights_hidden_output += tmp except: print("Something went wrong when writing to the file") # calculate hidden errors: try: hidden_errors = np.dot(self.weights_hidden_output.T, output_errors) except: print("Something went wrong when writing to the file") # ---------------------------------------------------------------------- # update the weights: tmp1 = Derivative.reLU(output_hidden) tmp = hidden_errors * tmp1 # ----------------------------------------------------------------------- self.weights_in_hidden += self.learning_rate * np.dot( tmp, input_vector.T)
import numpy from inputer import Inputer inclass = Inputer() from three_nn import Three_nn threeclass = Three_nn() from numpy.random import * from back import Back backclass = Back() from hyojun_in import Hyojun_in from sigmoid import Sigmoid from relu import Relu from batchnorm import Batchnorm batchmoment = 0.9 from dropout import Dropout dropclass = Dropout() import sys from adam import Adam from convolutional import Convolutional #### while True: iden_train = input("identification or training? i/t > ") if iden_train == "i": itbool = True break elif iden_train == "t": itbool = False break else: print("illegal input from keyboard.") ####
def foo(mod, op, d): if (op[0] == "linear"): xx = Linear(d) # rnncell, lstmcell, grucell elif (mod[0] in ["LSTMCell", "GRUCell"]) and (op[0] == "forward"): xx = RNNCell(d) elif op[0] in [ "conv1d", "conv2d", ]: xx = Conv(d) elif (op[0] in Pointwise.ops): xx = Pointwise(d) elif (op[0] in Convert.ops): xx = Convert(d) elif op[0] in ["__matmul__", "matmul"]: xx = Matmul(d) elif op[0] == "embedding": xx = Embedding(d) #reduction elif op[0] == "sum": xx = Sum(d) elif op[0] == "mean": xx = Mean(d) elif op[0] == "norm": xx = Norm(d) elif op[0] == "dropout": xx = Dropout(d) #Index, Slice, Join, Mutate elif (op[0] == "cat"): xx = Cat(d) elif (op[0] == "reshape"): xx = Reshape(d) elif (op[0] == "masked_scatter_"): xx = MaskedScatter(d) elif (op[0] == "gather"): xx = Gather(d) elif (op[0] == "nonzero"): xx = Nonzero(d) elif (op[0] == "index_select"): xx = IndexSelect(d) elif (op[0] == "masked_select"): xx = MaskedSelect(d) #blas elif op[0] in ["addmm", "addmm_"]: xx = Addmm(d) elif op[0] == "mm": xx = Mm(d) elif op[0] == "bmm": xx = Bmm(d) #softmax elif op[0] == "softmax": xx = Softmax(d) elif op[0] == "log_softmax": xx = LogSoftmax(d) #loss elif op[0] == "mse_loss": xx = MSELoss(d) #optimizers elif op[0] == "adam": xx = Adam(d) #normalization elif op[0] == "batch_norm": xx = BatchNorm(d) #random elif op[0] == "randperm": xx = RandPerm(d) #misc elif op[0] == "copy_": xx = Copy(d) elif op[0] == "clone": xx = Clone(d) elif op[0] == "contiguous": xx = Contiguous(d) elif op[0] == "any": xx = Any(d) elif (op[0] in Activation.ops): xx = Activation(d) elif op[0] == "to": xx = Convert(d) else: xx = Foo(d) return xx
class TestDropout: def setUp(self): self.d = Dropout() self.raw = {} self.raw['results'] = { 'completed': [[u'desktop', u'14452'], [u'mobile', u'4073'], [u'tablet', u'4287']], 'not_completed': [[u'desktop', u'30864'], [u'mobile', u'11439'], [u'tablet', u'9887']] } self.processed = {} self.processed['results'] = { 'completed': { 'desktop': 14452, 'mobile': 4073, 'tablet': 4287 }, 'not_completed': { 'desktop': 30864, 'mobile': 11439, 'tablet': 9887 } } def test_convert_data_to_dict(self): input = [[u'Amazon Silk', u'193'], [u'Android Browser', u'1361'], [u'BlackBerry', u'116']] expected = { 'Amazon Silk': 193, 'Android Browser': 1361, 'BlackBerry': 116 } results = self.d.convert_data_to_dict(input) assert results == expected def test_get_unique_keys(self): ''' Get the unique keys across both sets of results. ''' del self.processed['results']['completed']['mobile'] expected = [u'desktop', u'mobile', u'tablet'] results = self.d.get_unique_keys(self.processed) assert results == expected def test_remove_missing_data(self): ''' Remove data with missing or low values. ''' self.processed['results']['completed']['mobile'] = 0 self.processed['results']['not_completed']['wristwatch'] = 0 expected = { 'completed': { 'tablet': 4287, 'desktop': 14452 }, 'not_completed': { 'tablet': 9887, 'desktop': 30864 } } results = self.d.remove_missing_data(self.processed) assert results == expected