def test(): data = """ 1-3 a: abcde 1-3 b: cdefg 2-9 c: ccccccccc""" run_tests(p1, [(data, 2)]) run_tests(p2, [(data, 1)])
def main(reg_lambda, learning_rate, loss_function, regulariser, niterations=10, enable_test_set_scoring=False, **kwargs): global data # Type of features to use. This can be set to 'bigram' or 'unigram+bigram' to use # bigram features instead of or in addition to unigram features. # Not required for assignment. feature_type = 'unigram' # First test the parts to be implemented and warn if something's wrong. print('=============') print('SANITY CHECKS') print('=============') print() util.run_tests() # Load the data. training_data, val_data, test_data, data = load_data(feature_type) # Train the classifier. print('Starting training.') weights, bias, training_log = train(training_data, val_data, loss_function, regulariser, reg_lambda, learning_rate, niterations) print('Training completed.') print() print('=====================') print('MODEL CHARACTERISTICS') print('=====================') print() # Display some useful statistics about the model and the training process. title = 'Data set: %s - Regulariser(%s): %g - Learning rate: %g ' \ '- Loss Function: %s' % ( data.name, regulariser, reg_lambda, learning_rate, loss_function) print() # Get final accuracy val_predictions = predict(weights, bias, val_data) val_accuracy = accuracy(val_data.labels, val_predictions) print('Accuracy: %g' % val_accuracy) util.show_stats(title, training_log, weights, bias, data.vocabulary, top_n=1, write_to_file="results.csv", configuration={ 'reg_lambda': reg_lambda, 'learning_rate': learning_rate, 'loss_function': loss_function, 'regulariser': regulariser, 'niterations': niterations, 'val_accuracy': val_accuracy }) util.create_plots(title, training_log, weights, log_keys=['training_loss_reg', 'val_loss']) if enable_test_set_scoring: # Check the performance on the test set. test_loss = loss_function.unregularised_loss(weights, bias, test_data) test_predictions = predict(weights, bias, test_data) test_accuracy = accuracy(test_data.labels, test_predictions) print() print('====================') print('TEST SET PERFORMANCE') print('====================') print() print('Test loss: %g' % test_loss) print('Test accuracy: %g' % test_accuracy)
def main(): # TRAINING HYPERPARAMETERS # Modify the following lines to change the training hyperparameters. # Regularisation strength reg_lambda = 0.0 # Learning rate learning_rate = 0.07 # Number of training iterations niterations = 100 # Loss function to use (select one and comment out the other) loss_function = LogisticLoss() # loss_function = HingeLoss() # Type of regularisation to use (select one and comment out the other) # regulariser = L1Regulariser() regulariser = L2Regulariser() # This should only be enabled once you've decided on a final set of hyperparameters enable_test_set_scoring = True # Controls to use perceptron or not enable_perceptron = True # Type of features to use. This can be set to 'bigram' or 'unigram+bigram' to use # bigram features instead of or in addition to unigram features. # Not required for assignment. feature_type = 'unigram' # END OF HYPERPARAMETERS # First test the parts to be implemented and warn if something's wrong. print('=============') print('SANITY CHECKS') print('=============') print() util.run_tests() # Load the data. print() print('===================') print('CLASSIFIER TRAINING') print('===================') print() print('Loading data sets...') data_dir = './poldata/poldata.zip' data = util.load_movie_data(data_dir) data.select_feature_type(feature_type) # Split the data set randomly into training, validation and test sets. training_data, val_data, test_data = data.train_val_test_split() # Train the classifier. print('Starting training.') if enable_perceptron: weights, bias, training_log = train_perceptron(training_data, val_data, niterations) else: weights, bias, training_log = train(training_data, val_data, loss_function, regulariser, reg_lambda, learning_rate, niterations) print('Training completed.') print() print('=====================') print('MODEL CHARACTERISTICS') print('=====================') print() # Display some useful statistics about the model and the training process. title = 'Data set: %s - Regulariser: %g - Learning rate: %g' % ( data.name, reg_lambda, learning_rate) print() util.show_stats(title, training_log, weights, bias, data.vocabulary, top_n=20) util.create_plots(title, training_log, weights, log_keys=['training_loss_reg', 'val_loss']) if enable_test_set_scoring: # Check the performance on the test set. test_loss = loss_function.unregularised_loss(weights, bias, test_data) test_predictions = predict(weights, bias, test_data) test_accuracy = accuracy(test_data.labels, test_predictions) print() print('====================') print('TEST SET PERFORMANCE') print('====================') print() print('Test loss: %g' % test_loss) print('Test accuracy: %g' % test_accuracy)
c.add(s[j]) j += 1 l = max(l, j - i) else: c.remove(s[i]) i += 1 return l def longestSubstringWithoutRepeatingCharacters_slidingWindowOptimized(s): n = len(s) map = dict() i, l = 0, 0 for j in range(n): if s[j] in map: i = max(map.get(s[j]), i) l = max(l, j - i + 1) map[s[j]] = j + 1 return l if __name__ == "__main__": solutions = [ longestSubstringWithoutRepeatingCharacters_bruteForce, longestSubstringWithoutRepeatingCharacters_slidingWindow, longestSubstringWithoutRepeatingCharacters_slidingWindowOptimized ] tests = [(("abcabcbb", ), 3), (("bbbbb", ), 1), (("pwwkew", ), 3), (("", ), 0)] util.run_tests(solutions, tests)
#!/usr/bin/env python from util import run_tests from test_suite import * if __name__ == "__main__": tests = arithmetic_tests + \ mod_tests + \ arithmetic_boolean_tests + \ nary_arithmetic_tests + \ bitwise_tests + \ boolean_tests + \ nary_boolean_tests + \ const_arith_tests + \ const_bool_tests + \ ite_tests + \ div_tests + \ array_tests + \ c_array_tests + \ misc_tests + \ biomatch_tests + \ kmeans_tests # shift_tests # TODO: add support for return value - int promotion # unsigned_arithmetic_tests + \ run_tests('c', tests)
#!/usr/bin/env python from util import run_tests from test_suite import * if __name__ == "__main__": tests = arithmetic_tests + \ arithmetic_boolean_tests + \ nary_arithmetic_tests + \ bitwise_tests + \ boolean_tests + \ nary_boolean_tests + \ const_arith_tests + \ const_bool_tests + \ loop_tests + \ ite_tests + \ function_tests + \ misc_tests # shift_tests + \ # arr_tests + \ run_tests('zok', tests)
L -= 1 R += 1 return R - L - 1 def longestPalindromicSubstring_expandAroundCenter(s): if len(s) == 0: return s n = len(s) start, end = 0, 0 for i in range(n): l1 = expandAroundCenter(s, i, i) l2 = expandAroundCenter(s, i, i + 1) l = max(l1, l2) if l > end - start: start = i - (l - 1) // 2 end = i + l // 2 return s[start:end + 1] if __name__ == "__main__": solutions = [ longestPalindromicSubstring_bruteForce, longestPalindromicSubstring_longestCommonSubstring, longestPalindromicSubstring_dynamicProgramming, longestPalindromicSubstring_expandAroundCenter ] tests = [(("babad", ), "bab"), (("cbbd", ), "bb"), (("a", ), "a"), (("ac", ), "a"), (("bb", ), "bb"), (("aacabdkacaa", ), "aca")] util.run_tests(solutions, tests, equals=lambda a, b: len(a) == len(b))
elif l2 is None: l1.val += remainder remainder = l1.val // 10 l1.val %= 10 if remainder == 0: break l1_prev = l1 l1 = l1.next else: l1.val += l2.val + remainder remainder = l1.val // 10 l1.val %= 10 l1_prev = l1 l1 = l1.next l2 = l2.next if remainder != 0 and l1_prev is not None: l1_prev.next = ListNode(remainder) return root if __name__ == "__main__": solutions = [addTwoNumbers] tests = [(([2, 4, 3], [5, 6, 4]), [7, 0, 8]), (([0], [0]), [0]), (([9, 9, 9, 9, 9, 9, 9], [9, 9, 9, 9]), [8, 9, 9, 9, 0, 0, 0, 1]), (([2, 4, 9], [5, 6, 4, 9]), [7, 0, 4, 0, 1])] for i, (inputs, expected) in enumerate(tests): inputs = tuple(ListNode.toLinkedList(x) for x in inputs) expected = ListNode.toLinkedList(expected) tests[i] = (inputs, expected) util.run_tests(solutions, tests, equals=ListNode.equals)