def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.w1 = nn.Parameter(1, 50) self.b1 = nn.Parameter(1, 50) self.w2 = nn.Parameter(50, 1) self.b2 = nn.Parameter(1, 1) self.learningRate = -0.05 self.maxLoss = 0.02
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" #1 input because there is a single x, 1 hidden layer of 100 hidden neurons, 1 output input = 1 neurons = 100 output = 1 self.w1 = nn.Parameter(input, neurons) self.w2 = nn.Parameter(neurons, output) self.b1 = nn.Parameter(1, neurons) self.b2 = nn.Parameter(1, output)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 1 self.hidden_layer_size = 30 self.learning_rate = -0.004 self.weights1 = nn.Parameter(1, self.hidden_layer_size) self.bias1 = nn.Parameter(1, self.hidden_layer_size) self.weightsout = nn.Parameter(self.hidden_layer_size, 1) self.biasout = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.w1 = nn.Parameter(1, 40) self.b1 = nn.Parameter(1, 40) self.w2 = nn.Parameter(40, 1) self.b2 = nn.Parameter(1, 1) self.multiplier = -0.005 self.batch_size = 1
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.Hidden_layer_sizes = 400 #self.batch_size = 50 self.learning_rate = -0.005 # self.num_of_hidden_layer = 2 self.weight1 = nn.Parameter(1, self.Hidden_layer_sizes) self.bias1 = nn.Parameter(1, self.Hidden_layer_sizes) self.weight2 = nn.Parameter(self.Hidden_layer_sizes, 1) self.bias2 = nn.Parameter(1, 1)
def __init__(self, widths, lossFunction, batchSize, learningRate, targetAccuracy): dimension = [(widths[i], widths[i + 1]) for i in range(0, len(widths) - 1)] self.weight = list(map(lambda x: nn.Parameter(*x), dimension)) self.bias = list(map(lambda x: nn.Parameter(1, x[1]), dimension)) self.lossFunction = lossFunction self.batchSize = batchSize self.learningRate = learningRate self.targetAccuracy = targetAccuracy
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.w1 = nn.Parameter(784, 300) self.b1 = nn.Parameter(1, 300) self.w2 = nn.Parameter(300, 10) self.b2 = nn.Parameter(1, 10) self.multiplier = -0.15 self.batch_size = 100
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 1 self.learning_rate = -0.01 self.hidden_layer_size = 100 self.weight1 = nn.Parameter(1, 100) self.weight2 = nn.Parameter(100, 1) self.bias1 = nn.Parameter(1, 100) self.bias2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" # initialize weight and bias for the model parameter. # The trainable parameters of perceptron are chosen arbitrarily for the network to be sufficiently large # to approximate sin(x) over the given interval self.weight1 = nn.Parameter(1, 100) self.bias1 = nn.Parameter(1, 100) self.weight2 = nn.Parameter(100, 1) self.bias2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here h1 = 300 # h2 = 50 self.w1 = nn.Parameter(784, h1) # self.w2 = nn.Parameter(h1,h2) self.w2 = nn.Parameter(h1, 10) self.b1 = nn.Parameter(1, h1) # self.b2 = nn.Parameter(1,h2) self.b2 = nn.Parameter(1, 10) "*** YOUR CODE HERE ***"
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.multiplier = -0.05 self.batch_size = 100 self.hidden_size = 300 # what is input size??? self.W1 = nn.Parameter(1, self.hidden_size) self.b1 = nn.Parameter(1, self.hidden_size) self.W2 = nn.Parameter(self.hidden_size, 1) self.b2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.alpha = -.002 self.weight_1 = nn.Parameter(1, 90) self.weight_2 = nn.Parameter(90, 1) self.bias_1 = nn.Parameter(1, 90) self.bias_2 = nn.Parameter(1, 1) self.parameters = [ self.weight_1, self.weight_2, self.bias_1, self.bias_2 ]
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.learning_rate = 0.1 self.hidden_size = 100 self.weights = nn.Parameter(1, self.hidden_size) self.bias = nn.Parameter(1, self.hidden_size) self.weights2 = nn.Parameter(self.hidden_size, 1) self.bias2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.hidden_lsize = 250 self.learning_rate = -.01 self.batch_size = 200 self.m_1 = nn.Parameter(1, self.hidden_lsize) self.b_1 = nn.Parameter(1, self.hidden_lsize) self.m_2 = nn.Parameter(self.hidden_lsize, 1) self.b_2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.learningRate = -0.01 self.hiddenLayerSize = 50 self.batch_sizes = 10 self.W1 = nn.Parameter(1, self.hiddenLayerSize) self.b1 = nn.Parameter(1, self.hiddenLayerSize) self.W2 = nn.Parameter(self.hiddenLayerSize, 1) self.b2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" # Remember to set self.learning_rate! # You may use any learning rate that works well for your architecture "*** YOUR CODE HERE ***" self.l1b = nn.Parameter(1, 100) self.l1 = nn.Parameter(1, 100) self.two = nn.Parameter(100, 1) self.l2b = nn.Parameter(1, 1) self.multiplier = .01
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" #adding my parameters, this is a two-layer nn self.m1 = nn.Parameter(1, 1000) #bias 1 same size as m1 self.bias1 = nn.Parameter(1, 1000) #size inverse to m1 self.m2 = nn.Parameter(1000, 1) #bias2 #needs to be 1x1 self.bias2 = nn.Parameter(1, 1)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" inputdim = 1 h1 = 120 h2 = 80 outputdim = 1 self.w1 = nn.Parameter(inputdim, h1) self.b1 = nn.Parameter(1, h1) self.w2 = nn.Parameter(h1, h2) self.b2 = nn.Parameter(1, h2) self.w3 = nn.Parameter(h2, outputdim)
def __init__(self): # Initialize your model parameters here self.learning_rate = -.05 self.batch_size = 200 self.layers = [] self.layers.append(nn.Parameter(1, 50)) self.layers.append(nn.Parameter(1, 50)) self.layers.append(nn.Parameter(50, 1)) self.layers.append(nn.Parameter(1, 1)) self.numLayers = len(self.layers)
def __init__(self): self.W1 = nn.Parameter(784, 150) self.b1 = nn.Parameter(1, 150) #self.W2 = nn.Parameter(250,250) #self.b2 = nn.Parameter(1,250) #self.W3 = nn.Parameter(250,10) #self.b3 = nn.Parameter(1,10) self.W2 = nn.Parameter(150, 10) self.b2 = nn.Parameter(1, 10) self.layer_num = 3 self.batch_size = 100 self.multiplier = -0.35
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" # Hidden layer sizes: between 10 and 400 # Batch size: between 1 and the size of the dataset. For Q2 and Q3, we require that total size of the dataset be evenly divisible by the batch size. # Learning rate: between 0.001 and 1.0 # Number of hidden layers: between 1 and 3self.batch_size = 1 self.batch_size = 1 self.learning_rate = 0.005 self.m0 = nn.Parameter(1, 100) self.b0 = nn.Parameter(1, 100) self.m1 = nn.Parameter(100, 1) self.b1 = nn.Parameter(1, 1)
def __init__(self): # Our dataset contains words from five different languages, and the # combined alphabets of the five languages contain a total of 47 unique # characters. # You can refer to self.num_chars or len(self.languages) in your code self.num_chars = 47 self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"] # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batchSize = 500 self.learning_rate = -0.2 #neural net 1 --------------------- self.hiddenSize = 400 self.W = nn.Parameter(self.num_chars, self.hiddenSize) self.W_hidden = nn.Parameter(self.hiddenSize, self.hiddenSize) #neural net 2 --------------------- self.hL1 = 200 self.hL2 = 200 self.weights = [ nn.Parameter(self.hiddenSize, self.hL1), nn.Parameter(self.hL1, self.hL2), nn.Parameter(self.hL2, 5) ] self.bias = [ nn.Parameter(1, self.hL1), nn.Parameter(1, self.hL2), nn.Parameter(1, 5) ]
def initialize_w_and_b(self): weights = [] biases = [] h = self.hidden_layer_size if self.number_layers == 1: weights.append(nn.Parameter(1, 1)) biases.append(nn.Parameter(1, 1)) else: w = nn.Parameter(1, h) weights.append(w) b = nn.Parameter(1, h) biases.append(b) for i in range(self.number_layers - 2): w = nn.Parameter(h, h) weights.append(w) b = nn.Parameter(1, h) biases.append(b) w = nn.Parameter(h, 1) weights.append(w) b = nn.Parameter(1, 1) biases.append(b) self.weights = weights self.biases = biases
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" sizes = [784, 200, 200, 10] self.batch_size = 100 self.step_size = 0.2 self.acceptable_loss = 0.98 # initialize the layers and weights self.layers = [] for i in range(1, len(sizes)): # each layer is a tuple of (weights, bias) self.layers.append(nn.Parameter(sizes[i - 1], sizes[i])) self.layers.append(nn.Parameter(1, sizes[i]))
def __init__(self): self.num_chars = 47 self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"] self.W = nn.Parameter(47, 200) self.W_hidden = nn.Parameter(200, 200) self.b = nn.Parameter(1, 200) self.W_last = nn.Parameter(200, 5) self.b_last = nn.Parameter(1, 5) self.multiplier = -0.15 self.batch_size = 100
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" input_size = 1 output_size = 1 hidden_size = 250 self.learning_rate = 0.005 self.batch_size = 4 self.w1 = nn.Parameter(input_size, hidden_size) self.w2 = nn.Parameter(hidden_size, output_size) self.b1 = nn.Parameter(1, hidden_size) self.b2 = nn.Parameter(1, output_size)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 25 self.num_neurons_hidden_layer = 50 # layer 1 # input is size (batch_size x 1) so W must be size (1 x #nodes_hidden_layer) self.w_1 = nn.Parameter(1, self.num_neurons_hidden_layer) # weight vector 1 self.b_1 = nn.Parameter(1, self.num_neurons_hidden_layer) # bias vector 1 # output layer self.output_w = nn.Parameter(self.num_neurons_hidden_layer, 1) self.output_b = nn.Parameter(1, 1)
def __init__(self): # Our dataset contains words from five different languages, and the # combined alphabets of the five languages contain a total of 47 unique # characters. # You can refer to self.num_chars or len(self.languages) in your code self.num_chars = 47 self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"] # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 50 self.hidden_layer_size = 25 self.d = 150 self.w_init = [] self.w_init.append(nn.Parameter(self.num_chars, 200)) self.w_init.append(nn.Parameter(200, self.d)) self.w_hidden = nn.Parameter(self.d, self.d) # self.w_hidden = [] # self.w_hidden.append(nn.Parameter(self.d, self.hidden_layer_size)) # self.w_hidden.append(nn.Parameter(self.hidden_layer_size, self.d)) self.b_init = [] self.b_init.append(nn.Parameter(1, 200)) self.b_init.append(nn.Parameter(1, self.d)) self.w_final = nn.Parameter(self.d, 5) self.b = nn.Parameter(1, self.d) self.b_final = nn.Parameter(1, 5)
def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" # initalize dimentions self.batch_size = 100 self.hidden_size = 50 self.learning_rate = .01 # weights self.w1 = nn.Parameter(1, self.hidden_size) self.w2 = nn.Parameter(self.hidden_size, 1) # bias self.b1 = nn.Parameter(1, self.hidden_size) self.b2 = nn.Parameter(1, 1)
def __init__(self): # Our dataset contains words from five different languages, and the # combined alphabets of the five languages contain a total of 47 unique # characters. # You can refer to self.num_chars or len(self.languages) in your code self.num_chars = 47 self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"] hidden_layer_size = 100 # Initialize your model parameters here "*** YOUR CODE HERE ***" self.w1 = nn.Parameter(self.num_chars, hidden_layer_size) self.w2 = nn.Parameter(hidden_layer_size, len(self.languages)) self.wh = nn.Parameter(hidden_layer_size, hidden_layer_size)