def __init__(self, input_dim, f1, f2, d1, num_classes): ''' Parameters ---------- input_dim: size of input based on training data n1 : int The number of neurons in the first hidden layer num_classes : int The number of classes predicted by the model''' init_kwargs = {'gain': np.sqrt(2)} self.conv1 = conv(input_dim, f1, 5, 5, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.conv2 = conv(f1, f2, 5, 5, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.dense1 = dense(f2 * 37 * 37, d1, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.dense2 = dense(d1, num_classes, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs)
def __init__(self): self.conv1 = conv(1, 5, 2, 2, stride=2, padding=0, weight_initializer=glorot_uniform) self.conv2 = conv(5, 10, 2, 2, stride=1, padding=0, weight_initializer=glorot_uniform) self.dense1 = dense(360, 300, weight_initializer=glorot_uniform) self.dense2 = dense(300, 300, weight_initializer=glorot_uniform) self.dense3 = dense(300, 5, weight_initializer=glorot_uniform) self.layers = (self.conv1, self.conv2, self.dense1, self.dense2, self.dense3) self.tensors = [] for layer in self.layers: for parameter in layer.parameters: self.tensors.append(parameter) self.weights = [parameter.data for parameter in self.tensors]
def __init__(self): params = np.load("params.npy") #this gain is a parameter for the weight initializiation function glorot_uniform #which you can read more about in the documentation, but it isn't crucial for now #If you would like to read more about how Xavier Glorot explains the rationalization behind these weight initializations, #look here for his paper written with Yoshua Bengio. (http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf) init_kwargs = {'gain': np.sqrt(2)} #We will use a dropout probability of 0.5 so that values are randomly set to 0 in our data self.dropout_prob = 0.5 #initialize your two dense and convolution layers as class attributes using the functions imported from MyNN #We will use weight_initializer=glorot_uniform for all 4 layers #You know the input size of your first convolution layer. Try messing around with the output size but make sure that the following #layers dimmensions line up. For your first convolution layer start with input = 1, output = 20, filter_dims = 5, stride = 5, #padding = 0 self.dense1 = dense(180, 200, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.dense2 = dense(200, 10, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.conv1 = conv(1, 20, (5, 5), stride=1, padding=0, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.conv2 = conv(20, 20, (2, 2), stride=2, padding=0, weight_initializer=glorot_uniform, weight_kwargs=init_kwargs) self.dropout = dropout(self.dropout_prob) self.conv1.weight = Tensor(params[0]) self.conv1.bias = Tensor(params[1]) self.conv2.weight = Tensor(params[2]) self.conv2.bias = Tensor(params[3]) self.dense1.weight = Tensor(params[4]) self.dense1.bias = Tensor(params[5]) self.dense2.weight = Tensor(params[6]) self.dense2.bias = Tensor(params[7])
def __init__(self): #Check this: the 3s-- kernel size gain = {'gain': np.sqrt(2)} self.conv1 = conv(3, 20, (5,5), weight_initializer=glorot_uniform, weight_kwargs=gain) self.conv2 = conv(20, 10, (5,5), weight_initializer=glorot_uniform, weight_kwargs=gain) #Check the dimensions on this: self.dense3 = dense(49000 , 232, weight_initializer=glorot_uniform, weight_kwargs=gain)
def __init__(self): self.conv1 = conv(1, 10, 5, padding=0, weight_initializer=glorot_uniform) self.conv2 = conv(5, 20, 5, padding=0, weight_initializer=glorot_uniform) self.dense1 = dense(290, 20, weight_initializer=glorot_uniform) self.dense2 = dense(20, 2, weight_initializer=glorot_uniform)
def __init__(self): """ Initializes model layers and weights. """ # <COGINST> init_kwargs = {'gain': np.sqrt(2)} self.conv1 = conv(200, 250, 2, stride = 1, weight_initializer = glorot_normal, weight_kwargs = init_kwargs) self.dense1 = dense(250, 250, weight_initializer = glorot_normal, weight_kwargs = init_kwargs) self.dense2 = dense(250,1, weight_initializer = glorot_normal, weight_kwargs = init_kwargs)
def __init__(self): self.conv1 = conv(1, 50, 3, 3, stride=1, weight_initializer=glorot_uniform) self.conv2 = conv(50, 20, 3, 3, stride=1, weight_initializer=glorot_uniform) self.dense1 = dense(180, 50, weight_initializer=glorot_uniform) self.dense2 = dense(50, 29, weight_initializer=glorot_uniform) pass
def __init__(self): self.conv1 = conv(1, 50, (5, 5), stride=5, weight_initializer=glorot_uniform, weight_kwargs=gain) self.conv2 = conv(50, 20, (2, 2), stride=2, weight_initializer=glorot_uniform, weight_kwargs=gain) self.dense1 = dense(500, 50, weight_initializer=glorot_uniform, weight_kwargs=gain) self.dense2 = dense(50, 29, weight_initializer=glorot_uniform, weight_kwargs=gain) pass
def __init__(self, dim_in=48, num_out=7, load=False): self.conv1 = conv(1, 32, 2, 2, stride=1, padding=0, weight_initializer=Model.init) self.conv2 = conv(32, 64, 3, 3, stride=2, padding=1, weight_initializer=Model.init) self.conv3 = conv(64, 128, 2, 2, stride=2, weight_initializer=Model.init) self.conv4 = conv(128, 256, 3, 3, stride=3, weight_initializer=Model.init) self.dense1 = dense(256, 512, weight_initializer=Model.init) self.dense2 = dense(512, num_out, weight_initializer=Model.init) if (load): data = np.load("data/npmodelParam.npz") self.conv1.weight = data["l1w"] self.conv1.bias = data["l1b"] self.conv2.weight = data["l2w"] self.conv2.bias = data["l2b"] self.conv3.weight = data["l3w"] self.conv3.bias = data["l3b"] self.conv4.weight = data["l4w"] self.conv4.bias = data["l4b"] self.dense1.weight = data["l5w"] self.dense1.bias = data["l5b"] self.dense2.weight = data["l6w"] self.dense2.bias = data["l6b"]
def policyForward(self, data): data = mg.Tensor(data) conv1 = conv(1, 20, 5, 5, stride=1, padding=0, weight_initializer=glorot_uniform) for i in range(len(self.weights) - 1): data = mg.matmul(data, self.weights[i]) # hidden layers data[data < 0] = 0 # ReLU return mg.matmul(data, self.weights[-1]) # outNeurons