def test_claim_overlap(): c1 = Claim.new_from_string("#1 @ 1,3: 4x4") c2 = Claim.new_from_string("#2 @ 3,1: 4x4") c3 = Claim.new_from_string("#3 @ 5,5: 2x2") c4 = Claim.new_from_string("#4 @ 1,1: 1x1") assert c1.overlaps(c2) assert c2.overlaps(c1) assert c1.overlaps(c3) == False assert c4.overlaps(c4)
def _no_overlap(claim: Claim, claims: List[Claim]) -> bool: for other in claims: if claim == other: continue if claim.overlaps(other): return False return True
def matches_with_decrypt(triple, decrypt_fn): (sub, pre, obj) = triple logging.debug(f'Find matches for({sub}, {pre}, {obj})') # protect against bad requests for (None, None, None) or (?, None, ?) if not sub and not obj: raise RuntimeException( 'Searching for (None, None, None) or (None, ?, None) is prohibited)' ) # get matching pointer pointer_pattern = patterns.make_pattern(sub, pre, obj) pointers_raw = dht.get_pointers(pointer_pattern) # gotta see if we can decrypt these pointers pointers = map(lambda p: decrypt_fn(sub, pre, obj, p), pointers_raw) claims = {} for pointer in pointers: if pointer: claim = Claim.from_hex(dht.get_claim(pointer.id), pointer.key) logging.info( f'Claim found= {pointer.id[0:10]}...{pointer.id[-10:]}') claims[claim.get_id()] = claim # topgraphical sort + return return kahnsort(claims.values())
def main(): fabric_length = 1000 fabric = np.zeros([fabric_length, fabric_length], dtype="U5") duplicated_fabric = 0 claims = [] with open("input.txt") as file: claims_input = file.readlines() for claim_input in claims_input: split_claim = re.split(' @ |,|: |x', claim_input.strip()) claim = Claim(split_claim[0], int(split_claim[1]), int(split_claim[2]), int(split_claim[3]), int(split_claim[4])) claims.append(claim) for claim in claims: for x, y in product( range(claim.left_space, claim.left_space + claim.width), range(claim.top_space, claim.top_space + claim.height)): if '#' in fabric[x][y]: duplicated_fabric += 1 fabric[x][y] = 'xx' elif fabric[x][y] == '': fabric[x][y] = claim.claim_id for claim in claims: claim_intact = True for x, y in product( range(claim.left_space, claim.left_space + claim.width), range(claim.top_space, claim.top_space + claim.height)): if fabric[x][y] != claim.claim_id: claim_intact = False break if claim_intact: print(claim.claim_id)
def count_overlapping_squares(claims): claimed_fabric = {} duplicated_squares = set() for claim_description in claims: claim = Claim(claim_description) for x in claim.get_x_range(): for y in claim.get_y_range(): coordinate = Coordinate(x, y).get_value() if coordinate in claimed_fabric: claimed_fabric[coordinate] = 'X' duplicated_squares.add(coordinate) else: claimed_fabric[coordinate] = claim.id return len(duplicated_squares)
def part2_v3(file_name: str) -> str: """ Find the one claim that doesn't overlap any other claim. Return its id. This version is similar to part2_v2(), but we handle the filtering of the claim ID's differently. In my haphazard benchmarking, it seems marginally slower. """ with open(file_name) as f: claims = [Claim.new_from_string(line.rstrip()) for line in f] # Each value is a claim ID. set_claim_ids = set() # Each key is a sq inch tuple. Each value is a list of claim ID's. d_sq_inches: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) # set_claim_ids will end up with all claim ID's. # d_sq_inches will end up with all square inches that have claims on them, # and a list of the claim ID's that have the claims. for claim in claims: set_claim_ids.add(claim.id) for sq_inch in claim.sq_inches: d_sq_inches[sq_inch].append(claim.id) # Now for all square inches with more than one claim, # discard the claim ID's from set_claim_ids. for claim_list in d_sq_inches.values(): if len(claim_list) > 1: for claim_id in claim_list: set_claim_ids.discard(claim_id) # The only claim ID left is one we want. return set_claim_ids.pop()
def part2_v2(file_name: str) -> str: """ Find the one claim that doesn't overlap any other claim. Return its id. This version is much faster than part2_v1(). """ with open(file_name) as f: claims = [Claim.new_from_string(line.rstrip()) for line in f] # Each key is a claim ID. Each value is a bool. d_claim_ids = {} # Each key is a sq inch tuple. Each value is a list of claim ID's. d_sq_inches: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) # Load both dictionaries. d_claim_ids will end up with all claim ID's. # d_sq_inches will end up with all square inches that have claims on them, # and a list of the claim ID's that have the claims. for claim in claims: d_claim_ids[claim.id] = True for sq_inch in claim.sq_inches: d_sq_inches[sq_inch].append(claim.id) # Now for all square inches with more than one claim, # flip d_claim_ids_no_overlap to False. for claim_list in d_sq_inches.values(): if len(claim_list) > 1: for claim_id in claim_list: d_claim_ids[claim_id] = False # Now return the one claim ID that's still True. for claim_id, b in d_claim_ids.items(): if b: return claim_id raise Exception(NOT_FOUND)
def test_claim_new_from_string(): s = "#14 @ 690,863: 12x20" claim = Claim.new_from_string(s) assert claim.id == "14" assert claim._x == 690 assert claim._y == 863 assert claim._width == 12 assert claim._height == 20
def create_new_claim_prompt(): claim_type = input('Enter Claim Type: ') description = input('Enter claim description: ') claim_amount = input('Enter claim amount: $') date_of_accident = input('Enter date of accident (mm/dd/yy): ') date_of_claim = input('Enter date of claim (mm/dd/yy): ') new_claim = Claim(claim_type, description, claim_amount, date_of_accident, date_of_claim) return new_claim
def test_add_claim_should_add_1_claim_to_list(self): # Arrange new_claim = Claim('Boat', 'Hijacked', 555, '02/01/18', '02/11/18') # Act self.claim_repo.add_claim(self.claim) expected = 2 actual = len(self.claim_repo.claims_list) # Assert self.assertEqual(expected, actual)
def __init__(self, url, claim): # root: Claim self.tree_root = Claim(url, claim, Tokenizer(claim)) self.response_object = [{'citeID': self.tree_root.id, 'parentCiteID': 0, 'link': self.tree_root.href, 'score': self.tree_root.score, 'source': self.tree_root.text}] self.queue = q.PriorityQueue() inilist = [] inilist.append(self.tree_root) self.queue.put(ClaimPath(inilist, 1)) self.beam_search(self.tree_root)
def part2_v1(file_name: str) -> str: """ Find the one claim that doesn't overlap any other claim. Return its id. """ with open(file_name) as f: claims = [Claim.new_from_string(line.rstrip()) for line in f] for claim in claims: if _no_overlap(claim, claims): return claim.id raise Exception(NOT_FOUND)
def find_fully_isolated_claim(claims): claimed_fabric = {} intersected_claims = set() all_claims = set() for claim_description in claims: claim = Claim(claim_description) for x in claim.get_x_range(): for y in claim.get_y_range(): coordinate = Coordinate(x, y).get_value() if coordinate in claimed_fabric: claimed_fabric[coordinate].append(claim.id) for claim_id in claimed_fabric[coordinate]: intersected_claims.add(claim_id) else: claimed_fabric[coordinate] = [claim.id] all_claims.add(claim.id) return (all_claims - intersected_claims).pop()
def part1(file_name: str) -> int: """ How many square inches of fabric are within two or more claims? """ with open(file_name) as f: claims = [Claim.new_from_string(line.rstrip()) for line in f] d: DefaultDict[Tuple[int, int], int] = defaultdict(lambda: 0) # Count how many times each square inch appears in a claim for claim in claims: for sq_inch in claim.sq_inches: d[sq_inch] += 1 # Count the # of values in d that are > 1 return len([v for v in d.values() if v > 1])
def claim_get(id): logging.debug('----------------------------------------') key = request.values.get('key', None) data = dht.get_claim(id) if data: if key: claim = Claim.from_hex(data, key) # FIXME key needs decoding. return Response(claim.to_json(False), mimetype='application/json') else: return Response(json.dumps({'data': data}), mimetype='application/json') else: return abort(404)
def test_controller(): test_set_claims = os.path.join(config['cwd'], 'testing_set', 'claims.txt') f_claim = open(test_set_claims, 'r', errors='replace') claims = [line for line in f_claim] f_claim.close() test_set_links = os.path.join(config['cwd'], 'testing_set', 'links.txt') f_links = open(test_set_links, 'r', errors='replace') links = [line for line in f_links] f_links.close() claim_Class = [] for x in range(len(claims)): claim_new = Claim(links[x].strip(), claims[x].strip(), 0) claim_Class.append(claim_new) for claim in claim_Class: # print(claim) print(claim.get_full_claim())
def test_claim_sq_inches(): c1 = Claim.new_from_string("#1 @ 1,3: 4x4") assert c1.sq_inches == [ (1, 3), (1, 4), (1, 5), (1, 6), (2, 3), (2, 4), (2, 5), (2, 6), (3, 3), (3, 4), (3, 5), (3, 6), (4, 3), (4, 4), (4, 5), (4, 6), ]
def claim(self, ttl, grace, limit=5): """ Claims a set of messages. The server configuration determines the maximum number of messages that can be claimed. """ href = proc_template(self._claims_template, limit=str(limit)) body = {"ttl": ttl, "grace": grace} hdrs, body = self._conn._perform_http(href=href, method='POST', request_body=body) # Build a list of Message objects using a list comprehesion msgs = [ Message(self._conn, href=msg['href'], content=msg) for msg in body ] location = hdrs['location'] return Claim(conn=self._conn, messages=msgs, href=location)
def setupClaims(filename): claims = [] with open(filename) as f: for line in f: sub = line.split(' ') idNbr = int(sub[0].split('#')[1]) dim = sub[3].split('x') width = int(dim[0]) height = int(dim[1]) coord = sub[2].split(',') x = int(coord[0]) y = int(coord[1][0:-1]) c = Claim(idNbr, x, y, width, height) claims.append(c) return claims
def main(): fabric_length = 1000 fabric = np.zeros([fabric_length, fabric_length], dtype=int) duplicated_fabric = 0 claims = [] with open("input.txt") as file: claims_input = file.readlines() for claim_input in claims_input: split_claim = re.split(' @ |,|: |x', claim_input.strip()) claim = Claim(split_claim[0], int(split_claim[1]), int(split_claim[2]), int(split_claim[3]), int(split_claim[4])) claims.append(claim) for claim in claims: for x, y in product( range(claim.left_space, claim.left_space + claim.width), range(claim.top_space, claim.top_space + claim.height)): if fabric[x][y] == 1: duplicated_fabric += 1 fabric[x][y] = 2 elif fabric[x][y] == 0: fabric[x][y] = 1 print(duplicated_fabric)
def get_input_array(input_file): input = [] for line in input_file: pieces = line.split() i = pieces[0] c = pieces[len(pieces) - 2] s = pieces[len(pieces) - 1] id_string = i.replace("#", "") id = int(id_string) coords = c.replace(":", "") xy = coords.split(",") x = int(xy[0]) y = int(xy[1]) size = s.split("x") width = int(size[0]) height = int(size[1]) input.append(Claim(id, x, y, width, height)) return input
def extract_claim_and_review(self, parsed_claim_review_page: BeautifulSoup, url: str) -> List[Claim]: """ I think that this method extract everything """ claim = Claim() claim.set_rating_value( self.extract_rating_value(parsed_claim_review_page)) claim.set_alternate_name( FatabyyanoFactCheckingSiteExtractor.translate_rating_value( self.extract_rating_value(parsed_claim_review_page))) claim.set_source("fatabyyano") claim.set_author("fatabyyano") claim.setDatePublished(self.extract_date(parsed_claim_review_page)) claim.set_claim(self.extract_claim(parsed_claim_review_page)) claim.set_body(self.extract_review(parsed_claim_review_page)) claim.set_refered_links(self.extract_links(parsed_claim_review_page)) claim.set_title(self.extract_claim(parsed_claim_review_page)) claim.set_date(self.extract_date(parsed_claim_review_page)) claim.set_url(url) claim.set_tags(self.extract_tags(parsed_claim_review_page)) return [claim]
def extract_claim_and_review(self, parsed_claim_review_page: BeautifulSoup, url: str) -> List[Claim]: claim = Claim() self.claim = self.extract_claim(parsed_claim_review_page) self.review = self.extract_review(parsed_claim_review_page) rating_value = self.extract_rating_value(parsed_claim_review_page) claim.set_rating_value(rating_value) claim.set_alternate_name(self.translate_rating_value(rating_value)) claim.set_source(self.extract_author( parsed_claim_review_page)) # auteur de la review claim.set_author(self.extract_claimed_by( parsed_claim_review_page)) # ? auteur de la claim? # claim.setDatePublished(self.extract_date(parsed_claim_review_page)) #? publication de la claim claim.set_claim(self.claim) claim.set_body(self.review) claim.set_refered_links(self.extract_links(parsed_claim_review_page)) claim.set_title(self.extract_title(parsed_claim_review_page)) # date de la publication de la review claim.set_date(self.extract_date(parsed_claim_review_page)) claim.set_url(url) claim.set_tags(self.extract_tags(parsed_claim_review_page)) # extract_entities returns two variables json_claim, json_body = self.extract_entities(self.claim, self.review) claim.set_claim_entities(json_claim) claim.set_body_entities(json_body) return [claim]
def create_claim(claimtype, description, claimamount, dateofincident, dateofclaim, isvalid): new_claim = Claim(claimtype, description, claimamount, dateofincident, dateofclaim, isvalid) claims.append(new_claim)
def readPDFFile(self): # text = self.getTextFromPDF(self._pdfFileName) # items = text.splitlines(keepends=False) # items = self.clean(items) items = ITEMS # for item in items: # print(' \"%s\",' % item) lastNames = self.getLastNames(items) firstNames = self.getFirstNames(items) phns = self.getPHNs(items) genders = self.getGenders(items) birthdays = self.getBirthdays(items) acqDates = self.getAcqDates(items) refDrs = self.getRefDrs(items) count = len(lastNames) for n in lastNames: print("last name: '%s'" % n) if len(firstNames) != count: raise ValueError("Got %d first names; expected %d" % (len(firstNames), count)) if len(phns) != count: raise ValueError("Got %d PHNs; expected %d" % (len(phns), count)) if len(genders) != count: raise ValueError("Got %d genders; expected %d" % (len(genders), count)) if len(birthdays) != count: raise ValueError("Got %d birthdays; expected %d" % (len(birthdays), count)) if len(acqDates) != count: raise ValueError("Got %d acqDates; expected %d" % (len(acqDates), count)) if len(refDrs) != count: raise ValueError("Got %d refDrs; expected %d" % (len(refDrs), count)) for i in range(count): claim = Claim() claim.setFirstName(firstNames[i]) claim.setLastName(lastNames[i]) claim.setPhn(phns[i]) claim.setGender(genders[i]) claim.setBirthday(birthdays[i]) claim.setAcqDate(acqDates[i]) claim.setRefDr(refDrs[i]) self._claims.append(claim)
def claim(n): n = n[0] return Claim(n[0], n[1], n[2], n[3])
def get_claim(self): if len(self.triples_added) > 0 or len(self.triples_removed) > 0: return Claim(self.triples_added, self.triples_removed, self.links) else: return None
def create_claim(claim_type, description, amount, accident_date, claim_date, is_valid): new_claim = Claim(claim_type, description, amount, accident_date, claim_date, is_valid) claim_list.append(new_claim)
def setUp(self): self.claim_repo = ClaimRepository([]) self.claim = Claim('Car', 'Accident', 350, '01/01/18', '02/11/18') self.claim_repo.add_claim(self.claim)
if option_number == "1": print('\nSee all claims') claim_repo.view_all_claims() elif option_number == "2": print('\nTake care of next Claim') take_care_of_claim_prompt() elif option_number == "3": print('\nEnter a new Claim') new_claim = create_new_claim_prompt() claim_repo.add_claim(new_claim) else: print('\nExiting out of application') exit() if __name__ == "__main__": claim = Claim('Car','Accident',350,'01/01/18','02/01/18') claim2 = Claim('Boat','Hijacking',450,'02/01/18','02/01/18') claim_repo.add_claim(claim) claim_repo.add_claim(claim2) while True: user_input = print_menu_options_and_get_input() menu_option_logic(user_input) # break # Start of test claim = Claim('Car','Accident',350,'01/01/18','02/01/18') print(f'{claim.date_of_accident.date()} - {claim.date_of_claim.date()}') time_between_claim_and_accident = claim.date_of_accident.date() - claim.date_of_claim.date() print(type(time_between_claim_and_accident)) print(time_between_claim_and_accident) print(abs(time_between_claim_and_accident.days))