def check_regression(tracker): import models model = models.RegressionModel(1) dataset = backend.RegressionDataset(model) detected_parameters = None for batch_size in (1, 2, 4): inp_x = nn.Constant(dataset.x[:batch_size]) inp_y = nn.Constant(dataset.y[:batch_size]) output_node = model.run(inp_x) verify_node(output_node, 'node', (batch_size, 1), "RegressionModel.run()") trace = trace_node(output_node) assert inp_x in trace, "Node returned from RegressionModel.run() does not depend on the provided input (x)" if detected_parameters is None: detected_parameters = [ node for node in trace if isinstance(node, nn.Parameter) ] for node in trace: assert not isinstance( node, nn.Parameter ) or node in detected_parameters, ( "Calling RegressionModel.run() multiple times should always re-use the same parameters, but a new nn.Parameter object was detected" ) for batch_size in (1, 2, 4): inp_x = nn.Constant(dataset.x[:batch_size]) inp_y = nn.Constant(dataset.y[:batch_size]) loss_node = model.get_loss(inp_x, inp_y) verify_node(loss_node, 'loss', None, "RegressionModel.get_loss()") trace = trace_node(loss_node) assert inp_x in trace, "Node returned from RegressionModel.get_loss() does not depend on the provided input (x)" assert inp_y in trace, "Node returned from RegressionModel.get_loss() does not depend on the provided labels (y)" for node in trace: assert not isinstance( node, nn.Parameter ) or node in detected_parameters, ( "RegressionModel.get_loss() should not use additional parameters not used by RegressionModel.run()" ) tracker.add_points(2) # Partial credit for passing sanity checks model.train(dataset) backend.maybe_sleep_and_close(1) train_loss = model.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y)) verify_node(train_loss, 'loss', None, "RegressionModel.get_loss()") train_loss = nn.as_scalar(train_loss) # Re-compute the loss ourselves: otherwise get_loss() could be hard-coded # to always return zero train_predicted = model.run(nn.Constant(dataset.x)) verify_node(train_predicted, 'node', (dataset.x.shape[0], 1), "RegressionModel.run()") sanity_loss = 0.5 * np.mean((train_predicted.data - dataset.y)**2) assert np.isclose( train_loss, sanity_loss), ("RegressionModel.get_loss() returned a loss of {:.4f}, " "but the autograder computed a loss of {:.4f} " "based on the output of RegressionModel.run()".format( train_loss, sanity_loss)) loss_threshold = 0.15 if train_loss <= loss_threshold: print("Your final loss is: {:f}".format(train_loss)) tracker.add_points(4) else: print( "Your final loss ({:f}) must be no more than {:.4f} to receive full points for this question" .format(train_loss, loss_threshold)) #print(list(dataset.x.flatten())) b0, b1 = model.closedFormSolution(list(dataset.x.flatten()), list(dataset.y.flatten())) x = dataset.x.flatten() y = dataset.y.flatten() A = np.vstack([x, np.ones(len(x))]).T m, c = np.linalg.lstsq(A, y, rcond=None)[0] if (np.isclose(b0, c) and np.isclose(b1, m)): tracker.add_points(2)
def check_regression(tracker): import models model = models.RegressionModel() dataset = backend.RegressionDataset(model) detected_parameters = None for batch_size in (1, 2, 4): inp_x = nn.Constant(dataset.x[:batch_size]) inp_y = nn.Constant(dataset.y[:batch_size]) output_node = model.run(inp_x) verify_node(output_node, 'node', (batch_size, 1), "RegressionModel.run()") trace = trace_node(output_node) assert inp_x in trace, "Node returned from RegressionModel.run() does not depend on the provided input (x)" if detected_parameters is None: detected_parameters = [node for node in trace if isinstance(node, nn.Parameter)] for node in trace: assert not isinstance(node, nn.Parameter) or node in detected_parameters, ( "Calling RegressionModel.run() multiple times should always re-use the same parameters, but a new nn.Parameter object was detected") for batch_size in (1, 2, 4): inp_x = nn.Constant(dataset.x[:batch_size]) inp_y = nn.Constant(dataset.y[:batch_size]) loss_node = model.get_loss(inp_x, inp_y) verify_node(loss_node, 'loss', None, "RegressionModel.get_loss()") trace = trace_node(loss_node) assert inp_x in trace, "Node returned from RegressionModel.get_loss() does not depend on the provided input (x)" assert inp_y in trace, "Node returned from RegressionModel.get_loss() does not depend on the provided labels (y)" for node in trace: assert not isinstance(node, nn.Parameter) or node in detected_parameters, ( "RegressionModel.get_loss() should not use additional parameters not used by RegressionModel.run()") tracker.add_points(2) # Partial credit for passing sanity checks time_out = False if platform == "linux" or platform == "linux2" or platform == "darwin": # https://stackoverflow.com/questions/492519/timeout-on-a-function-call # linux # OS X # print("Using MacOS") signal.signal(signal.SIGALRM, handler) signal.alarm(240) try: model.train(dataset) except Exception as exc: print(exc) time_out = True signal.alarm(0) elif platform == "win32": # Windows... print("Using Windows no automatic timeout included") model.train(dataset) if time_out == False: model.train(dataset) backend.maybe_sleep_and_close(1) train_loss = model.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y)) verify_node(train_loss, 'loss', None, "RegressionModel.get_loss()") train_loss = nn.as_scalar(train_loss) # Re-compute the loss ourselves: otherwise get_loss() could be hard-coded # to always return zero train_predicted = model.run(nn.Constant(dataset.x)) verify_node(train_predicted, 'node', (dataset.x.shape[0], 1), "RegressionModel.run()") sanity_loss = 0.5 * np.mean((train_predicted.data - dataset.y)**2) assert np.isclose(train_loss, sanity_loss), ( "RegressionModel.get_loss() returned a loss of {:.4f}, " "but the autograder computed a loss of {:.4f} " "based on the output of RegressionModel.run()".format( train_loss, sanity_loss)) loss_threshold = 0.02 if train_loss <= loss_threshold: print("Your final loss is: {:f}".format(train_loss)) tracker.add_points(4) else: print("Your final loss ({:f}) must be no more than {:.4f} to receive full points for this question".format(train_loss, loss_threshold)) else: print("Your training timed out, therefore no final loss to report and test.")