def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" xa = nn.Linear(x, self.a) xab = nn.AddBias(xa, self.b) xabR = nn.ReLU(xab) xabRc = nn.Linear(xabR, self.c) xabRcd = nn.AddBias(xabRc, self.d) return xabRcd
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" Z1 = nn.AddBias(nn.Linear(x,self.w1),self.b1) A1 = nn.ReLU(Z1) Z2 = nn.AddBias(nn.Linear(A1,self.w2),self.b2) A2 = nn.ReLU(Z2) Z3 = nn.AddBias(nn.Linear(A2,self.w3),self.b3) return Z3
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" layer0in = nn.Linear(x, self.m1) layer0mid = nn.AddBias(layer0in, self.b1) layer0out = nn.ReLU(layer0mid) layer1in = nn.Linear(layer0out, self.m2) layer1mid = nn.AddBias(layer1in, self.b2) return layer1mid
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" affine = nn.Linear(x, self.w1) bias1 = nn.AddBias(affine, self.b1) relued = nn.ReLU(bias1) affine2 = nn.Linear(relued, self.w2) bias2 = nn.AddBias(affine2, self.b2) relued2 = nn.ReLU(bias2) affine3 = nn.Linear(relued2, self.w3) bias3 = nn.AddBias(affine3, self.b3) relued3 = nn.ReLU(bias3) out = nn.Linear(relued3, self.w4) return out
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" layer1 = nn.ReLU( nn.AddBias(nn.Linear(x, self.weights[0]), self.bias[0])) layer2 = nn.ReLU( nn.AddBias(nn.Linear(layer1, self.weights[1]), self.bias[1])) layer3 = nn.AddBias(nn.Linear(layer2, self.weights[2]), self.bias[2]) return layer3
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" # relu(x*m1 + b1)*m2 + b2 # weights = self.m1 # feature_input = x xm1 = nn.Linear(x, self.m1) # x = feature_input, m = weight xm1b1 = nn.AddBias(xm1, self.b1) reluxm1b1 = nn.ReLU(xm1b1) relum2 = nn.Linear(reluxm1b1, self.m2) predicted_y = nn.AddBias(relum2, self.b2) return predicted_y
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ xw_1 = nn.Linear(x, self.w1) r_1 = nn.ReLU(nn.AddBias(xw_1, self.b1)) xw_2 = nn.Linear(r_1, self.w2) r_2 = nn.AddBias(xw_2, self.b2) xw_3 = nn.Linear(r_2, self.w3) r_3 = nn.AddBias(xw_3, self.b3) return r_3
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" l1 = nn.Linear(x, self.w0) l1b = nn.AddBias(l1, self.b0) r1 = nn.ReLU(l1b) l2 = nn.Linear(r1, self.w1) l2b = nn.AddBias(l2, self.b1) return l2b
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" xm1 = nn.Linear(x, self.m1) # x = feature_input, m = weight xm1b1 = nn.AddBias(xm1, self.b1) reluxm1b1 = nn.ReLU(xm1b1) relum2 = nn.Linear(reluxm1b1, self.m2) predicted_y = nn.AddBias(relum2, self.b2) return predicted_y # (batch_size x 10)
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" features1 = nn.Linear(x, self.weight1) n = nn.AddBias(features1, self.bias1) b1 = nn.ReLU(n) features2 = nn.Linear(b1, self.weight2) b2 = nn.AddBias(features2, self.bias2) return b2
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" wTx = nn.Linear(x, self.w) return nn.AddBias(wTx, self.bias)
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" first_coefficient = nn.Linear(x, self.weight1) first_predict = nn.AddBias(first_coefficient, self.bia1) first_layer = nn.ReLU(first_predict) second_coefficient = nn.Linear(first_layer, self.weight2) second_predict = nn.AddBias(second_coefficient, self.bia2) second_layer = nn.ReLU(second_predict) third_coefficient = nn.Linear(second_layer, self.weight3) third_predict = nn.AddBias(third_coefficient, self.bia3) third_layer = nn.ReLU(third_predict) output = nn.AddBias(nn.Linear(third_layer, self.weight4), self.bia4) return output
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ first = nn.Linear(x, self.w1) second = nn.AddBias(first, self.b1) third = nn.Linear(nn.ReLU(second), self.w2) fourth = nn.AddBias(third, self.b2) fifth = nn.Linear(nn.ReLU(fourth), self.w3) sixth = nn.AddBias(fifth, self.b3) return sixth
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" # Based on the Linear Regression example given in the question # get the linear shape and compute the model's predictions for y xm1 = nn.Linear(x, self.weight1) predicted_y1 = nn.AddBias(xm1, self.bias1) # for non-linearity fx1 = nn.ReLU(predicted_y1) # In this model, I am choosing to only do 2 layer deep network xm2 = nn.Linear(fx1, self.weight2) fx_net = nn.AddBias(xm2, self.bias2) return fx_net
def main(): X, y = gen_data(100) plot_data(X, y) model = nn.Linear(2, 1) for i in range(20): probs = model(X) preds = np.where(probs >= 0, 1, 0) model.backward(preds - y.reshape(-1, 1)) model.w -= 1e-2 * model.w.grad plot_clf(model, X, y) print(f'acc: {np.sum(preds == y.reshape(-1, 1)) / len(y):.2f}')
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" #f(x)=relu(relu(x⋅W1+b1)⋅W2+b2)⋅W3+b3 #layer1 layer1 = nn.Linear(x, self.m1) layer1WithBias = nn.AddBias(layer1, self.bias1) layer1Rectified = nn.ReLU(layer1WithBias) #layer2 layer2 = nn.Linear(layer1Rectified, self.m2) layer2WithBias = nn.AddBias(layer2, self.bias2) layer2Rectified = nn.ReLU(layer2WithBias) #rectified one doesent work return layer2WithBias
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" layerz = nn.AddBias(nn.Linear(x, self.w1), self.b1) for i in range(len(self.layer)): layerz = nn.AddBias(nn.Linear(nn.ReLU(layerz), self.layer[i]), self.bias[i]) return nn.AddBias(nn.Linear(nn.ReLU(layerz), self.w2), self.b2)
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" xm1 = nn.Linear(x, self.m1) xm1_add_b1 = nn.AddBias(xm1, self.b1) relu = nn.ReLU(xm1_add_b1) xm2 = nn.Linear(relu, self.m2) xm2_add_b2 = nn.AddBias(xm2, self.b2) relu2 = nn.ReLU(xm2_add_b2) xm3 = nn.Linear(relu2, self.m3) y_predict = nn.AddBias(xm3, self.b3) return y_predict
def run(self, xs): """ Runs the model for a batch of examples. Although words have different lengths, our data processing guarantees that within a single batch, all words will be of the same length (L). Here `xs` will be a list of length L. Each element of `xs` will be a node with shape (batch_size x self.num_chars), where every row in the array is a one-hot vector encoding of a character. For example, if we have a batch of 8 three-letter words where the last word is "cat", then xs[1] will be a node that contains a 1 at position (7, 0). Here the index 7 reflects the fact that "cat" is the last word in the batch, and the index 0 reflects the fact that the letter "a" is the inital (0th) letter of our combined alphabet for this task. Your model should use a Recurrent Neural Network to summarize the list `xs` into a single node of shape (batch_size x hidden_size), for your choice of hidden_size. It should then calculate a node of shape (batch_size x 5) containing scores, where higher scores correspond to greater probability of the word originating from a particular language. Inputs: xs: a list with L elements (one per character), where each element is a node with shape (batch_size x self.num_chars) Returns: A node with shape (batch_size x 5) containing predicted scores (also called logits) """ z = nn.Linear(xs[0], self.weights) h = nn.AddBias(z,self.function_bias) h = nn.AddBias(nn.Linear(nn.ReLU(h), self.layer2), self.bias2) first = True for i in xs: if first: first = False continue z = nn.Add(nn.Linear(i, self.weights), nn.Linear(h, self.hidden_leaf_village)) h = nn.AddBias(nn.ReLU(z), self.function_bias) return nn.Linear(h, self.result_weight)
def run(self, xs): """ Runs the model for a batch of examples. Although words have different lengths, our data processing guarantees that within a single batch, all words will be of the same length (L). Here `xs` will be a list of length L. Each element of `xs` will be a node with shape (batch_size x self.num_chars), where every row in the array is a one-hot vector encoding of a character. For example, if we have a batch of 8 three-letter words where the last word is "cat", then xs[1] will be a node that contains a 1 at position (7, 0). Here the index 7 reflects the fact that "cat" is the last word in the batch, and the index 0 reflects the fact that the letter "a" is the inital (0th) letter of our combined alphabet for this task. Your model should use a Recurrent Neural Network to summarize the list `xs` into a single node of shape (batch_size x hidden_size), for your choice of hidden_size. It should then calculate a node of shape (batch_size x 5) containing scores, where higher scores correspond to greater probability of the word originating from a particular language. Inputs: xs: a list with L elements (one per character), where each element is a node with shape (batch_size x self.num_chars) Returns: A node with shape (batch_size x 5) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" # print(xs) # print(len(xs)) self.h = nn.ReLU(nn.Linear(xs[0], self.w_in)) for i in range(0, len(xs)): self.h = nn.ReLU( nn.Add(nn.Linear(xs[i], self.w_in), nn.Linear(self.h, self.w_hidden))) return nn.Linear(self.h, self.w_out)
def __init__(self, config, encoder_output_dim, action_dict, ent_dict, tri_dict, arg_dict): self.config = config self.model = pm.global_collection() bi_rnn_dim = encoder_output_dim # config['rnn_dim'] * 2 #+ config['edge_embed_dim'] lmda_dim = config['lmda_rnn_dim'] part_ent_dim = config['part_ent_rnn_dim'] self.lmda_dim = lmda_dim self.bi_rnn_dim = bi_rnn_dim hidden_input_dim = lmda_dim * 3 + bi_rnn_dim * 2 + config['out_rnn_dim'] self.hidden_arg = nn.Linear(hidden_input_dim, config['output_hidden_dim'], activation='tanh') self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict)) hidden_input_dim_co = lmda_dim * 3 + bi_rnn_dim * 2 + config[ 'out_rnn_dim'] self.hidden_ent_corel = nn.Linear(hidden_input_dim_co, config['output_hidden_dim'], activation='tanh') self.output_ent_corel = nn.Linear(config['output_hidden_dim'], 2) self.position_embed = nn.Embedding(500, 20) attn_input = self.bi_rnn_dim * 1 + 20 * 2 self.attn_hidden = nn.Linear(attn_input, 80, activation='tanh') self.attn_out = nn.Linear(80, 1)
def main(): path = get_path(file) male, female = loader.read_bmi(path) dataset = male plot(dataset, title='Data', show=True) n_in = len(dataset.inputs[0]) n_out = len(dataset.outputs[0]) train_data, test_data = split(dataset, test_size=0.4) model = nn.Model(nn.Linear(n_in, 32, nn.sigmoid), nn.Linear(32, 8, nn.sigmoid), nn.Linear(8, n_out, nn.sigmoid)) bmi = nn.Classifier(model) bmi.train(train_data, test_data, target_acc=0.92) res = result(bmi, dataset) plot(dataset, categories, n_cols=2, title='Data') plot(res, categories, title='Prediction', show=True, num=2, n_cols=2)
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ dotProd1 = nn.Linear(x, self.w0) withBias1 = nn.AddBias(dotProd1, self.b0) firstLayer = nn.ReLU(withBias1) dotProd2 = nn.Linear(firstLayer, self.w1) outputLayer = nn.AddBias(dotProd2, self.b1) return outputLayer
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ # Layer 1 vector = nn.Linear(x, self.w) added_bias = nn.AddBias(vector, self.b) layer1_output = nn.ReLU(added_bias) # Layer 2 vector2 = nn.Linear(layer1_output, self.w2) added_bias2 = nn.AddBias(vector2, self.b2) return added_bias2
def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ biased = None "*** YOUR CODE HERE ***" step = nn.Linear(x, self.w[0]) biased = nn.AddBias(step, self.b[0]) for i in range(self.num_hidden_layers): relu = nn.ReLU(biased) step = nn.Linear(relu, self.w[i + 1]) biased = nn.AddBias(step, self.b[i + 1]) return biased
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" x1 = nn.Linear(x, self.w[0]) x1 = nn.AddBias(x1, self.b[0]) activate_x = nn.ReLU(x1) x2 = nn.Linear(activate_x, self.w[1]) pred_y = nn.AddBias(x2, self.b[1]) return pred_y """
def run(self, states): """ Runs the DQN for a batch of states. The DQN takes the state and returns the Q-values for all possible actions that can be taken. That is, if there are two actions, the network takes as input the state s and computes the vector [Q(s, a_1), Q(s, a_2)] Inputs: states: a (batch_size x state_dim) numpy array Q_target: a (batch_size x num_actions) numpy array, or None Output: result: (batch_size x num_actions) numpy array of Q-value scores, for each of the actions """ s_w1 = nn.Linear(states, self.w1) relu_input = nn.AddBias(s_w1, self.b1) relu = nn.ReLU(relu_input) relu_b2 = nn.Linear(relu, self.w2) l1l2 = nn.AddBias(relu_b2, self.b2) relu_l3 = nn.ReLU(l1l2) l3_w3 = nn.Linear(relu_l3, self.w3) sol = nn.AddBias(l3_w3, self.b3) return sol
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ a = nn.Linear(x, self.w1) b = nn.AddBias(a, self.b1) c = nn.ReLU(b) d = nn.Linear(c, self.w2) e = nn.AddBias(d, self.b2) f = nn.ReLU(e) g = nn.Linear(f, self.w3) return g
def init_disc_model(state, share=True): if share: disc_model = nn.Sequential() else: disc_model = disc_shared_structure(state) disc_model.add( nn.Linear(state['d_num_filters'] * 4 * 7 * 7, 1, weight=state['d_init'], use_bias=True)) return disc_model
def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" # f(x) = relu(x * W1 + b1) * W2 + b2 x_dot_W1 = nn.Linear(x, self.W1) with_b1 = nn.AddBias(x_dot_W1, self.b1) relu = nn.ReLU(with_b1) x_dot_W2 = nn.Linear(relu, self.W2) with_b2 = nn.AddBias(x_dot_W2, self.b2) return with_b2