class TestResolver(unittest.TestCase): """ Test class for Resolver """ def setUp(self): self.resolver = Resolver() def testARecords(self): reference_a_record = [(1, '31.170.165.34')] test_a_record = self.resolver.resolve('eur.al') self.assertEqual(reference_a_record, test_a_record) def testNoResponse(self): reference_no_response = Resolver.NO_RESPONSE self.resolver.port = 65000 test = self.resolver.resolve('eur.al') self.assertEqual(reference_no_response, test) self.resolver.port = 53 def testNotFound(self): reference = Resolver.NAME_NOT_FOUND test = self.resolver.resolve('opsidjgsdkjf.paris') self.assertEqual(reference, test) def testCNameRecord(self): reference = 5, 'dijkstra.urgu.org' test = self.resolver.resolve('anytask.urgu.org') self.assertEqual(reference, test[0]) def testRecursion(self): self.resolver = Resolver('198.41.0.4') reference_a_record = [(1, '31.170.165.34')] test_a_record = self.resolver.resolve('eur.al') self.assertEqual(reference_a_record, test_a_record)
class Recognizer: def __init__(self): self.resolver = Resolver() self.thread = Thread(target=self.run, args=()).start() def run(self): faces = [] while True: try: frame = Store.movement_frames.get(True, 2) face_locations = face_recognition.face_locations(frame) face_encodings = face_recognition.face_encodings( frame, face_locations) for (top, right, bottom, left), encoding in zip(face_locations, face_encodings): cropped_img = frame[top - 10:bottom + 10, left - 10:right + 10] faces.append({ "img": frame[top:bottom, left:right], "encoding": encoding }) except queue.Empty: if len(faces) > 0: self.resolver.resolve(self.preprocess_faces(faces).copy()) faces = [] def preprocess_faces(self, faces): # Cluster the faces with chinese whispers encodings = [dlib.vector(face['encoding']) for face in faces] labels = dlib.chinese_whispers_clustering(encodings, 0.5) selected_faces = [] # Select face most close to average group groups = list(set(labels)) for group in groups: # Get indices for each group indices = [i for i in range(len(labels)) if labels[i] == group] group_encodings = [faces[i]['encoding'] for i in indices] # Get centroid for group encodings avg_group_encoding = np.average(group_encodings, axis=0) # Get the closest face to the centroid avg_distance = face_recognition.face_distance( group_encodings, avg_group_encoding) min_index = np.argmin(avg_distance) face_index = indices[min_index] selected_faces.append(faces[face_index]) return selected_faces
def fmt_serializer(node, fields): output = ('{}({})\n' '{}\n') table_data = tabulate(fields, headers="keys", tablefmt='grid') return output.format( node.name, ', '.join(Resolver.resolve(base) for base in node.bases), table_data)
def dynamic_fields(self): if not self._dynamic_field_map: nodes = self.view_registry.nodes for class_name, class_node in nodes.iteritems(): props = { 'include_filters': None, 'expand_filters': None, 'exclude_filters': None, 'serializer_class': None } for node in class_node.body: if not self.is_class_var(node): continue lhs, rhs = Resolver.resolve(node) if lhs in props: props[lhs] = self.resolve_view_var(rhs) serializer_name = props.pop('serializer_class') truncated_props = { key: val for key, val in props.iteritems() if val } if truncated_props: self._dynamic_field_map[serializer_name] = truncated_props return self._dynamic_field_map
def find_serializer_fields(self, serializer_name): nodes = self.serializer_registry.nodes if serializer_name in self.memo_dict: return self.memo_dict[serializer_name] class_node = nodes[serializer_name] fields = Fields() init_node = None # Look at own class variables first, this trumps everything else for node in class_node.body: if self.is_class_var(node): # explicit class var trumps Meta fields.add(Resolver.drf_field_assignment(node), overwrite=True) elif self.is_meta(node): fields.extend(Resolver.drf_meta_fields(node)) elif self.is_init_method(node): init_node = node # add fields from bases, in left to right order. The bases of the base # trumps the neighbour of the base if there's overlap. for base in class_node.bases: base = Resolver.resolve(base) if base == 'object': continue if base not in nodes: # TODO: ??? continue base_class_vars = self.find_serializer_fields(base) fields.extend(base_class_vars) # Check for dynamic fields that were inherited from direct ancestors. # TODO: Find a better way to support inheritance parent_in_dynamic_fields = any( getattr(parent_class, 'attr', None) in self.dynamic_fields or getattr(parent_class, 'id', None) in self.dynamic_fields for parent_class in class_node.bases) # dynamic fields trump or augment existing fields if serializer_name in self.dynamic_fields or parent_in_dynamic_fields: if init_node: dynamic_fields = Resolver.init_method(init_node) for field_name, field in dynamic_fields.iteritems(): if field_name not in fields: fields.add(field) continue previous_field = fields[field_name] augmented_field = self.augment_field(previous_field, field) fields.add(augmented_field, overwrite=True) self.memo_dict[serializer_name] = fields return fields
def run(self, source): scanner = Scanner(source, self) tokens = scanner.scan_tokens() parser = Parser(self, tokens) statements = parser.parse() if self.hadError: return if self.had_runtime_error: return resolver = Resolver(self.interpreter, self) resolver.resolve(statements) if self.hadError: return self.interpreter.interpret(statements)
def find_serializer_fields(self, serializer_name): nodes = self.serializer_registry.nodes if serializer_name in self.memo_dict: return self.memo_dict[serializer_name] class_node = nodes[serializer_name] fields = Fields() init_node = None # Look at own class variables first, this trumps everything else for node in class_node.body: if self.is_class_var(node): # explicit class var trumps Meta fields.add(Resolver.class_var_drf_field(node), overwrite=True) elif self.is_meta(node): fields.extend(Resolver.drf_meta_fields(node)) elif self.is_init_method(node): init_node = node # add fields from bases, in left to right order. The bases of the base # trumps the neighbour of the base if there's overlap. for base in class_node.bases: base = Resolver.resolve(base) if base == 'object': continue if base not in nodes: # TODO: ??? continue base_class_vars = self.find_serializer_fields(base) fields.extend(base_class_vars) # dynamic fields trump or augment existing fields if serializer_name in self.dynamic_fields: if not init_node: msg = ('Did not find __init__ in {} but view specifies dynamic' ' fields.').format(serializer_name) raise Exception(msg) dynamic_fields = Resolver.init_method(node) for field in dynamic_fields: if field not in fields: fields.add(field) continue previous_field = fields[field['field_name']] augmented_field = self.augment_field(previous_field, field) fields.add(augmented_field, overwrite=True) self.memo_dict[serializer_name] = fields return fields
def cli(): path = input("Введите путь до изображения:\n") image_analyzer = ImageAnalyzer() image_analyzer.read_image(path) print("{} прочитано".format(path)) image_analyzer.process_xo() main_map = image_analyzer.get_map() print("Расчет анализатора:\n {}".format(main_map)) print("0 - пустота, 1 - Х, 2 - О") resolver = Resolver(main_map) res_points = resolver.resolve() print("Ответ алгоритма: {}".format(res_points)) if image_analyzer.process_output(res_points): print("Результат в файле output.pnt") print("Done")
def main(): parser = argparse.ArgumentParser(description='YOBAdns-resolver') parser.add_argument( "address", metavar="Address", nargs="+", type=str, help='Address that you need to resolve') parser.add_argument( "--server", "-s", metavar="Server", nargs="?", type=str, default=DEFAULT_SERVER, help='Set the DNS server to first request, default: 8.8.8.8' ) parser.add_argument( "--port", "-p", metavar="Port", nargs="?", type=int, default=DEFAULT_PORT, help='Set the port of dedicated DNS server' ) parser.add_argument("-d", "--debug", action="store_true", help="switch on debug mode") parser.add_argument("-n", "--num", nargs="?", type=int, default=DEFAULT_NUM_OF_RETRIES, help="number of retries") parser.add_argument("-w", "--waiting", nargs="?", type=int, default=DEFAULT_TIMEOUT, help="waiting time of request") args = parser.parse_args() ############################################## resolver = Resolver(args.server, args.debug, args.port, args.num, args.waiting) received = resolver.resolve(args.address[0]) if received == resolver.NO_RESPONSE: print('\tNo response') else: if received == resolver.NAME_NOT_FOUND: print('\t\tName {} does not exist'.format(args.address[0])) else: print('\tDomain: ' + args.address[0]) print('\tResponses:') for response in received: print('\t\t{}\t{}'.format(resolver.TYPES[response[0]], response[1]))
class MonitoredResource(object): """ Monitored resource (URL). The ressource is generally any document in any format, but most often it will be HTML code. This class wraps the URL content and metadata. The contents can be manipulated within the time so it can provide information about how the content changed in different versions of the document. Warning: Application developers should generally not need to instantiate this class directly. The only correct way how to get this object is through Monitor.get() method. Design pattern: Active Record Example of checking new version of document: >>> from rrslib.web.changemonitor import Monitor >>> monitor = Monitor(user_id="myuid") >>> monitor Monitor(conn=Connection('localhost', 27017), dbname='webarchive', uid='myuid') >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> resource <MonitoredResource(url='http://www.myusefulpage.com/index.html', uid='myuid') at 0xb7398accL> >>> resource.check() True >>> # the resource has changed Checking availability of the document on the URL >>> from rrslib.web.changemonitor import HTTPDateTime >>> resource = monitor.get("http://www.nonexistentpage.com") >>> resource.available() False >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> resource.available(HTTPDateTime(2012, 6, 30, 15, 34)) True Example of getting last available version of the document on the URL >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> content = resource.get_last_version() >>> print content.data <html><head> ... >>> resource = monitor.get("http://www.crazynonexistentpage.com") >>> content = resource.get_last_version() Traceback (most recent call last): File "<stdin>", line 1, in <module> DocumentNotAvailable: The content of this URL is not available and it is not in the storage. Example of getting version of the document in exact time >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> content = resource.get_version(HTTPDateTime(2012, 6, 30, 15, 34)) Getting the last time when the document was checked: >>> resource = monitor.get("http://www.crazynotexistentpage.com") >>> resource.last_checked() HTTPDateTime(Thu, 01 Jan 1970 00:00:00 GMT) """ def __init__(self, url, uid, storage): """ @param url: monitored URL @type url: basestring (str or unicode) @param uid: user identifier. This ID has to be unique for each one, who is using changemonitor. @type uid: str @param storage: storage of monitored-resource data @type storage: model.Storage """ # resource data self.url = url self.uid = uid # models self.storage = storage self.headers = storage._headermeta # resolver self.resolver = Resolver(storage) # file try: self.file = self.storage.get(url) except DocumentNotAvailable: # if the file is not in the storage, resolver has to check # the url and load actual data into the storage self.resolver.resolve(url) try: self.file = self.storage.get(url) except DocumentNotAvailable: raise DocumentNotAvailable("Resource '%s' is not available." % url) def check(self): """ Check the resource URL and load the most recent version into database. TODO: consider using @lazy decorator. Most of use cases use this method so we have to insure that it will be called only once. @raises: DocumentTooLargeException @returns: True if the document has changed since last check. """ # bude vyuzivat resolveru pro checknuti URL a ziskani informace o tom, # jestli byl dokument zmenen. Mozna bude take dobre nahrat rovnou do # self.file nejnovejsi verzi, ale o tom je potreba jeste pouvazovat. # urcite je potreba pred kazdym checkem refreshout file cache # self.file.refresh_cache() #self.resolver.resolve(self.url) raise NotImplementedError() def get_last_version(self): """ Get last available content of the document. If the document is available at this time, returns most recent version which is on the web server. @returns: Last available content of this resource. @rtype: Content @raises: DocumentNotAvailable if no content available (resource does not exist on the URL and never existed within the known history) """ self.resolver.resolve(self.url) try: return self.file.get_last_version() except NoFile: # FIXME tady to prece nemuze byt??! raise DocumentNotAvailable("Resource is not available.") def get_version(self, time_or_version): """ Get content of this document in specified time or version. If the document was not available in given time, returns last available content. If there is no available content until given time, raises exception. @param time_or_version: Time or version of the content we want to retrieve. Version numbering is a convenience atop the GridFS API provided by MongoDB. version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. @type time_or_version: HTTPDateTime or int @raises: DocumentHistoryNotAvailable if there is no available content until given time or version """ if isinstance(time_or_version, HTTPDateTime): return self.file.get_version(time_or_version.to_timestamp()) elif isinstance(time_or_version, int): return self.file.get_version(time_or_version) else: raise TypeError("Version time has to be type HTTPDateTime or GridFS version (int).") def get_diff(self, start, end): """ @param start: start time or version to be diffed @type start: HTTPDateTime or int @param end: end time or version to be diffed @type end: HTTPDateTime or int @returns: either textual or binary diff of the file (if available). If contents are equal (document did not change within this time range) returns None. @rtype: unicode @raises: DocumentHistoryNotAvaliable if the storage doesn't provide enough data for computing the diff. """ content_start = self.get_version(start) content_end = self.get_version(end) if content_start == content_end: return None return content_start.diff_to(content_end) def available(self, httptime=None): if not isinstance(httptime, HTTPDateTime): raise TypeError("Time of availability has to be type HTTPDateTime.") # Pokud je httptime=None, pak se jedna o dostupnost v tomto okamziku raise NotImplementedError() def last_checked(self): """ Get information about the time of last check of this resource. @returns: time of last check or None if the resource was never checked (or the HTTP requests timed out) @rtype: HTTPDateTime or None """ return self.headers.last_checked(self.url) def __repr__(self): return "<MonitoredResource(url='%s', uid='%s') at %s>" % \ (self.url, self.uid, hex(id(self))) __str__ = __repr__
def resolve_view_var(self, node): try: return Resolver.resolve(node) except AttributeError: raise
class MonitoredResource(object): """ Monitored resource (URL). The ressource is generally any document in any format, but most often it will be HTML code. This class wraps the URL content and metadata. The contents can be manipulated within the time so it can provide information about how the content changed in different versions of the document. Warning: Application developers should generally not need to instantiate this class directly. The only correct way how to get this object is through Monitor.get() method. Design pattern: Active Record Example of checking new version of document: >>> from rrslib.web.changemonitor import Monitor >>> monitor = Monitor(user_id="myuid") >>> monitor Monitor(conn=Connection('localhost', 27017), dbname='webarchive', uid='myuid') >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> resource <MonitoredResource(url='http://www.myusefulpage.com/index.html', uid='myuid') at 0xb7398accL> >>> resource.check() True >>> # the resource has changed Checking availability of the document on the URL >>> from rrslib.web.changemonitor import HTTPDateTime >>> resource = monitor.get("http://www.nonexistentpage.com") >>> resource.available() False >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> resource.available(HTTPDateTime(2012, 6, 30, 15, 34)) True Example of getting last available version of the document on the URL >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> content = resource.get_last_version() >>> print content.data <html><head> ... >>> resource = monitor.get("http://www.crazynonexistentpage.com") >>> content = resource.get_last_version() Traceback (most recent call last): File "<stdin>", line 1, in <module> DocumentNotAvailable: The content of this URL is not available and it is not in the storage. Example of getting version of the document in exact time >>> resource = monitor.get("http://www.myusefulpage.com/index.html") >>> content = resource.get_version(HTTPDateTime(2012, 6, 30, 15, 34)) Getting the last time when the document was checked: >>> resource = monitor.get("http://www.crazynotexistentpage.com") >>> resource.last_checked() HTTPDateTime(Thu, 01 Jan 1970 00:00:00 GMT) """ def __init__(self, url, uid, storage): """ @param url: monitored URL @type url: basestring (str or unicode) @param uid: user identifier. This ID has to be unique for each one, who is using changemonitor. @type uid: str @param storage: storage of monitored-resource data @type storage: model.Storage """ # resource data self.url = url self.uid = uid # models self.storage = storage self.headers = storage._headermeta # resolver self.resolver = Resolver(storage) self._checked = False # file try: self.file = self.storage.get(url) except DocumentNotAvailable: # if the file is not in the storage, resolver has to check # the url and load actual data into the storage self.resolver.resolve(url) self._checked = True try: self.file = self.storage.get(url) except DocumentNotAvailable: raise DocumentNotAvailable("Resource '%s' is not available." % url) def check(self, force=False): """ Check the resource URL and load the most recent version into database. TODO: consider using @lazy decorator. Most of use cases use this method so we have to insure that it will be called only once. @param force: force use of resolver (no effect on first call), if force=False, doesn't try to download new content if called more than once @type force: Bool @raises: DocumentTooLargeException @raises: DocumentNotAvailable @returns: True if the document has changed since last check. """ # bude vyuzivat resolveru pro checknuti URL a ziskani informace o tom, # jestli byl dokument zmenen. Mozna bude take dobre nahrat rovnou do # self.file nejnovejsi verzi, ale o tom je potreba jeste pouvazovat. # urcite je potreba pred kazdym checkem refreshout file cache self.file.refresh_cache() if force: self._checked = False if not self._checked: # use resolver to get most recent version if not yet checked self.resolver.resolve(self.url) self._checked = True try: self.file = self.storage.get(self.url) except DocumentNotAvailable: raise DocumentNotAvailable("Resource '%s' is not available." % self.url) # and determine return value try: # time of last check _now = self.headers.get_by_version(self.url,-1,True)['timestamp'] _now = HTTPDateTime().from_timestamp(_now+1) # header and content are saved at the same time # we need to find content inserted before _now, that's why (_now+1) except Exception: raise DocumentNotAvailable("Check failed. Cannot get header information of '%s'." % self.url) try: # time of previous check _prev = self.headers.get_by_version(self.url,-2,True) _prev = HTTPDateTime().from_timestamp(_prev['timestamp']+1) except TypeError: # this is first time document is checked if self._checked: return False # if already checked, and have no v=-2 header, there was no change else: return True # this is the first-time check #DEBUG #? print "header time: _now: ",_now,"\nheader time: _prev: ",_prev,"\n" d = self.get_diff(_prev,_now) if d is None: return False if isinstance(d, basestring): # PlainTextDiff, maybe htmldiff in some cases if len(d)==0: return False else: return True if isinstance(d, dict): # BinaryDiff if string.find(d['metainfo'],"(patch data)")==-1 : # TODO: find what indicates that BinaryDiff-ed file hasn't changed # current version seems to work, but needs more testing return False else: return True try: # d is htmldiff output (generator object) chunk = d.next() if len(chunk.added)==0 and len(chunk.removed)==0: return False except (StopIteration, TypeError): # if can't get d.next(), then d is probably empty -> no change return False return True def get_last_version(self): """ Get last available content of the document. If the document is available at this time, returns most recent version which is on the web server. @returns: Last available content of this resource. @rtype: Content @raises: DocumentNotAvailable if no content available (resource does not exist on the URL and never existed within the known history) """ self.resolver.resolve(self.url) self._checked = True try: self.file = self.storage.get(self.url) return self.file.get_last_version() except NoFile: # FIXME tady to prece nemuze byt??! raise DocumentNotAvailable("Resource '%s' is not available." % self.url) def get_version(self, time_or_version): """ Get content of this document in specified time or version. If the document was not available in given time, returns last available content. If there is no available content until given time, raises exception. @param time_or_version: Time or version of the content we want to retrieve. Version numbering is a convenience atop the GridFS API provided by MongoDB. version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. @type time_or_version: HTTPDateTime or int @raises: DocumentHistoryNotAvailable if there is no available content until given time or version """ if isinstance(time_or_version, HTTPDateTime): return self.file.get_version(time_or_version.to_timestamp()) elif isinstance(time_or_version, int): return self.file.get_version(time_or_version) else: raise TypeError("Version time has to be type HTTPDateTime or GridFS version (int).") def get_diff(self, start, end): """ @param start: start time or version to be diffed @type start: HTTPDateTime or int @param end: end time or version to be diffed @type end: HTTPDateTime or int @returns: either textual or binary diff of the file (if available). If contents are equal (document did not change within this time range) returns None. @rtype: unicode @raises: DocumentHistoryNotAvaliable if the storage doesn't provide enough data for computing the diff. """ content_start = self.get_version(start) content_end = self.get_version(end) if content_start == content_end: return None return content_start.diff_to(content_end) def available(self, httptime=None): if (not isinstance(httptime, HTTPDateTime)) and (httptime is not None): raise TypeError("Time of availability has to be type HTTPDateTime.") # Pokud je httptime=None, pak se jedna o dostupnost v tomto okamziku if (httptime is None): try: self.check(force=True) except DocumentNotAvailable: return False return True else: # when was last checked before 'httptime' h = self.headers.get_by_time(self.url,httptime.to_timestamp()) if h is None: return False # not checked before time 'httptime' t1 = h['timestamp'] try: t2 = self.get_version(HTTPDateTime().from_timestamp(t1)).upload_date except DocumentHistoryNotAvaliable: return False return True def last_checked(self): """ Get information about the time of last check of this resource. @returns: time of last check or None if the resource was never checked (or the HTTP requests timed out) @rtype: HTTPDateTime or None """ return self.headers.last_checked(self.url) def __repr__(self): return "<MonitoredResource(url='%s', uid='%s') at %s>" % \ (self.url, self.uid, hex(id(self))) __str__ = __repr__