def week2(cls): s = BasicFuncs.load_file_as_string('inversion.txt') array = [] for line in s.splitlines(): array.append(int(line)) m = MergeSort(array) print(m.count_inversions())
def problem2(input_file: str): lines = BasicFuncs.load_file_as_string(input_file).splitlines() catalog = [] for i in lines[1:]: value, cost = i.split(' ') catalog.append((int(value), int(cost))) max_weight, num_of_items = lines[0].split(' ') max_weight, num_of_items = int(max_weight), int(num_of_items) first_vector = [] first_weight, first_value = catalog[0][1], catalog[0][0] for weight_allowed in range(max_weight + 1): if weight_allowed >= first_weight: first_vector.append(first_value) else: first_vector.append(0) last_vector = first_vector for item_index in range(1, num_of_items): item = catalog[item_index] current_weight, current_value = item[1], item[0] item_vector = last_vector.copy() for weight_allowed in range(current_weight, max_weight + 1): case1_excluded = last_vector[weight_allowed] case2_included = last_vector[weight_allowed - current_weight] + current_value item_vector[weight_allowed] = max(case1_excluded, case2_included) last_vector = item_vector if item_index % 200 == 0: print(item_index) return last_vector[-1]
def clustering_1(input_file: str): lines = BasicFuncs.load_file_as_string(input_file).splitlines() num_of_nodes = int(lines[0]) edges = [] for line in lines[1:]: start, finish, cost = map(int, line.split(' ')) edge = Edge(start, finish, cost) edges.append(edge) edges.sort(key=lambda edge: edge.cost) clusters = num_of_nodes union_find = UnionFind(num_of_nodes) for i, edge in enumerate(edges): a = edge.start - 1 b = edge.end - 1 if union_find.join_two_subsets(a, b): clusters -= 1 if clusters <= 4: break # Compute the smallest maximum spacing min_max_spacing = float('inf') for edge in edges[i + 1:]: a = edge.start - 1 b = edge.end - 1 if not union_find.are_two_indicies_part_of_same_set(a, b): min_max_spacing = min(min_max_spacing, edge.cost) return min_max_spacing
def test_problem_1_2017(self): self.assertEqual(AdventOfCode.problem_1_2017('1122'), 3) self.assertEqual(AdventOfCode.problem_1_2017('1111'), 4) self.assertEqual(AdventOfCode.problem_1_2017('1234'), 0) self.assertEqual(AdventOfCode.problem_1_2017('91212129'), 9) problem_input = BasicFuncs.load_file_as_string( 'problem_1_2017.txt').rstrip() print((AdventOfCode.problem_1_2017(problem_input)))
def assert_file_content_equal_to_string(self, actual_string: str, file_path: str): """ This method will check if the string in the file path is the same as the actual_string :param actual_string: :param file_path: :return: """ file_string = BasicFuncs.load_file_as_string(file_path) self.assertMultiLineEqual(file_string, actual_string)
def week3(cls): s = BasicFuncs.load_file_as_string('quick-sort.txt') array = [] for line in s.splitlines(): line = line.rstrip().lstrip() array.append(int(line)) sorter = QuickSort(array) sorter.sort('med') print(sorter.comparisons)
def get_med_sum(file_path: str) -> int: s = BasicFuncs.load_file_as_string(file_path) solver = ProgrammingAssignment1() med_sum = 0 for line in s.splitlines(): value = int(line) solver.add_new_element(value) median = solver.get_median() med_sum += median return med_sum % 10000
def problem3(cls, input_file: str): s = BasicFuncs.load_file_as_string(input_file) # 1, 2, 3, 4, 17, 117, 517, and 997 verts_to_check = [0, 1, 2, 3, 16, 116, 516, 996] vertices = [int(line) for line in s.splitlines()[1:]] # vertices = [10, 30, 50, 40] max_weight, subset = cls.rec_get_max_weight(vertices, len(vertices) - 1) output = '' for v in verts_to_check: if v in subset: output += '1' else: output += '0' return output
def load_graph_from_file(file_path: str): s = BasicFuncs.load_file_as_string(file_path) adj_set_list = {} num_of_edges = 0 for line in s.splitlines(): nums = re.split('\t|\s', line) node = int(nums[0]) adj_set_list[node] = [] for i in nums[1:]: if i == '': continue vertex_conn = int(i) adj_set_list[node].append(vertex_conn) num_of_edges += 1 num_of_edges = num_of_edges / 2 return adj_set_list, num_of_edges
def prims_algorithm(edges_file: str): s = BasicFuncs.load_file_as_string(edges_file) lines = s.splitlines() first_line = lines[0] num_of_nodes, num_of_edges = map(int, first_line.split(' ')) graph = {} for line in lines[1:]: start, finish, cost = map(int, line.split(' ')) if start not in graph: graph[start] = {finish: cost} else: if finish in graph[start]: graph[start][finish] = min(graph[start][finish], cost) else: graph[start][finish] = cost if finish not in graph: graph[finish] = {start: cost} else: if start in graph[finish]: graph[finish][start] = min(graph[finish][start], cost) else: graph[finish][start] = cost # Prims algorithm seen = set() pq = PriorityQueue() pq.put((0, 1)) mst_sum = 0 while len(seen) < num_of_nodes: node_cost, node = pq.get() if node in seen: continue seen.add(node) connections = graph[node] mst_sum += node_cost for connection in connections.keys(): if connection not in seen: relative_cost = connections[connection] info = (relative_cost, connection) pq.put(info) return mst_sum
def kruskals_algorithm(edges_file: str): s = BasicFuncs.load_file_as_string(edges_file) lines = s.splitlines() first_line = lines[0] num_of_nodes, num_of_edges = map(int, first_line.split(' ')) union_find = UnionFind(num_of_nodes) edges = [] for line in lines[1:]: start, end, cost = map(int, line.split(' ')) edge = Edge(start, end, cost) edges.append(edge) edges.sort(key=lambda x: x.cost) min_span_tree_cost = 0 for edge in edges: a = edge.start - 1 b = edge.end - 1 if union_find.join_two_subsets(a, b): min_span_tree_cost += edge.cost return min_span_tree_cost
def scheduling_app(cls, file_name: str): s = BasicFuncs.load_file_as_string(file_name) jobs = [] for line in s.splitlines()[1:]: weight, length = line.split(' ') weight, length = int(weight), int(length) difference = weight - length ratio = weight/length job_info = (weight, length, difference, ratio) jobs.append(job_info) # Part 1 problem jobs.sort(key=lambda x: (x[2], x[0])) jobs.reverse() decreasing_times = cls.get_weighted_sums(jobs) # Part 2 problem jobs.sort(key=lambda x: x[3]) jobs.reverse() ratio_times = cls.get_weighted_sums(jobs) return [decreasing_times, ratio_times]
def apply_huffman(input_file: str) -> HuffmanNode: s = BasicFuncs.load_file_as_string(input_file) h = [] for line in s.splitlines()[1:]: weight = int(line) node = HuffmanNode(line, weight) heapq.heappush(h, node) while h.__len__() > 1: first_node = heapq.heappop(h) sec_node = heapq.heappop(h) new_depth = max(first_node.max_depth, sec_node.max_depth) + 1 new_min_depth = min(first_node.min_depth, sec_node.min_depth) + 1 meta_node = HuffmanNode(None, first_node.freq + sec_node.freq, max_depth=new_depth, min_depth=new_min_depth) meta_node.left = first_node meta_node.right = sec_node heapq.heappush(h, meta_node) root = heapq.heappop(h) return root
def problem1(input_file: str): lines = BasicFuncs.load_file_as_string(input_file).splitlines() catalog = [] for i in lines[1:]: value, cost = i.split(' ') catalog.append((int(value), int(cost))) max_weight, num_of_items = lines[0].split(' ') max_weight, num_of_items = int(max_weight), int(num_of_items) matrix = [] first_vector = [] first_weight, first_value = catalog[0][1], catalog[0][0] for weight_allowed in range(max_weight + 1): if weight_allowed >= first_weight: first_vector.append(first_value) else: first_vector.append(0) matrix.append(first_vector) optimal_value = 0 for item_index in range(1, num_of_items): item = catalog[item_index] current_weight, current_value = item[1], item[0] item_vector = [] for weight_allowed in range(max_weight + 1): case1_excluded = matrix[item_index - 1][weight_allowed] if weight_allowed < current_weight: item_vector.append(case1_excluded) continue case2_included = matrix[item_index - 1][weight_allowed - current_weight] + current_value item_vector.append(max(case1_excluded, case2_included)) matrix.append(item_vector) optimal_value = max(optimal_value, max(item_vector)) return item_vector[-1]
def save_cookies(self, path_to_save: str): cookies = {c['name']: c['value'] for c in self.get_cookies()} json_cookies = json.dumps(cookies) BasicFuncs.write_to_file(path_to_save, json_cookies)
stack.append(conn) break else: last = stack.pop() self.finishing_order.append(last) def solve_problem(self): reversed_edges = self.reverse_edges() self.run_dfs_pass1(reversed_edges) self.run_dfs_pass2() if __name__ == '__main__': graph = {} s = BasicFuncs.load_file_as_string('task-1.txt') for line in s.splitlines(): line = line.rstrip() node, connect = line.split(' ') node = int(node) connect = int(connect) if node in graph: graph[node].add(connect) else: graph[node] = {connect} if connect not in graph: graph[connect] = set() print('Done loading from file') a = Assignment1(graph) a.solve_problem()
def test_problem2(self): s = '5 1 9 5\n7 5 3\n2 4 6 8' self.assertEqual(AdventOfCode.problem2(s), 18) problem_input = BasicFuncs.load_file_as_string('problem2.txt') print(AdventOfCode.problem2(problem_input))
def clustering_2(input_file: str): lines = BasicFuncs.load_file_as_string(input_file).splitlines() nodes = int(lines[0]) return
def load_file_as_array(file_path: str): s = BasicFuncs.load_file_as_string(file_path) array = [] for i in s.splitlines(): array.append(int(i)) return array
def test_load_json_file(self): file_data = '{"key": "value"}' with self.get_file_open_patch(file_data): a = BasicFuncs.load_json_file('does-not-matter') valid_output = {'key': 'value'} self.assertEqual(a, valid_output)
def load_cookies(self, cookies_json_path: str): json_cookies = BasicFuncs.load_json_file(cookies_json_path) for cookie_name, cookie_value in json_cookies.items(): self.add_cookie({'name': cookie_name, 'value': cookie_value}) return json_cookies