Esempio n. 1
0
    def authenticate(self, request):
        header = self.get_header(request)
        if header is None:
            return None
        raw_token = self.get_raw_token(header)
        if raw_token is None:
            return None
        validated_token = self.get_validated_token(raw_token)
        if validated_token is None:
            return
        try:
            user_id = validated_token[api_settings.USER_ID_CLAIM]
        except KeyError:
            return None
        try:
            user = User.objects.get(**{api_settings.USER_ID_FIELD: user_id})
        except User.DoesNotExist:
            return None

        if not user.is_active:
            return None

        cache_token = Cache.get(user.username)
        if cache_token is None or str(
                cache_token).strip() != raw_token.decode().strip():
            return None
        return self.get_user(validated_token)
Esempio n. 2
0
 def post(self, request, *args, **kwargs):
     serializer = self.get_serializer(data=request.data)
     serializer.is_valid(raise_exception=True)
     header = request.META.get('HTTP_AUTHORIZATION')
     if isinstance(header, str):
         header = header.encode(HTTP_HEADER_ENCODING)
     try:
         tag, token = header.split()
         cache_token = Cache.get(serializer.data["username"])
         if cache_token is not None and str(
                 cache_token).strip() == token.decode().strip():
             Cache.delete(serializer.data["username"])
             try:
                 user_obj = User.objects.get(
                     username=serializer.data["username"])
                 models.Log(username=user_obj.username,
                            event=0,
                            content="注销登陆成功!").save()
             except User.DoesNotExist:
                 logs.print(
                     "error", "{user}不存在,无法记录注销日志".format(
                         user=serializer.data["username"]))
     except ValueError:
         pass
     except AttributeError:
         return Response(status=status.HTTP_400_BAD_REQUEST)
     return Response({
         "code": 200,
         "msg": "注销成功!"
     },
                     status=status.HTTP_200_OK)
Esempio n. 3
0
    async def get_from_backend(address_components):
        cache = Cache(TAMULookup.name)
        result = cache.get(address_components)
        is_cached = bool(result)
        if is_cached:
            request_count_cached.labels(TAMULookup.name).inc()
        else:
            result = tamu_geocode_address(address_components)
            cache.save(address_components,
                       result)  # TODO do this in the background
        request_count.labels(TAMULookup.name).inc()

        point = Point(
            (Decimal(result["Longitude"]), Decimal(result["Latitude"])))
        feature = Feature(
            geometry=point,
            properties={
                "service": TAMULookup.name,
                "quality": result["NAACCRGISCoordinateQualityCode"],
                "timestamp":
                datetime.datetime.utcnow().isoformat() + "Z",  # poop
                "cached": is_cached,  # should this be a timestamp?
            },
        )

        return feature
Esempio n. 4
0
def cache_test():
    key = request.args.get("key")
    Cache.set(key,"val")
    Cache.set("name",key)
    name = Cache.get("name")
    Cache.delete("name")
    Cache.set("age",12)
    return jsonify({"name":name})
Esempio n. 5
0
	def __init__(self):
		cache = Cache()
		status = cache.key_status("/stats")
		if status == None:
			self.update(cache)
		else:
			if status == Cache.STALE:
				t = threading.Thread(target=self.update)
				t.start()
			self.distro_count, self.package_count, self.upstream_count, self.release_count = cache.get("/stats")
Esempio n. 6
0
	def __init__(self,branch="current"):
		self.branch = branch
		cache = Cache()
		status = cache.key_status("/distro_ranks/"+self.branch)
		if status == None:
			self.update(cache)
		else:
			if status == Cache.STALE:
				t = threading.Thread(target=self.update)
				t.start()
			self.distros, = cache.get("/distro_ranks/"+self.branch)
Esempio n. 7
0
	def __init__(self,package=None,distro=None):
		c = Cache()
		self.releases = []
		
		con = db.connect(host=HOST,user=USER,password=PASSWORD,database=DB)
		cur = con.cursor()
		
		if package != None:
			cur.execute("SELECT id FROM packages WHERE name = %s",(package,))
			package_id = cur.fetchone()[0]
		else:
			package_id = None
		
		if distro != None:
			cur.execute("SELECT id FROM distros WHERE name = %s",(distro,))
			row = cur.fetchone()
			if row==None:
				print "Unknown distro: " + distro
				raise UnknownDistroError(distro)
			distro_id = row[0]
		else:
			distro_id = None
		
		cached = False
		if package == None and distro == None:
			key = "/upstream/latest"
			query = "SELECT packages.name, ureleases.version, MIN(ureleases.released) FROM packages, ureleases WHERE packages.id = ureleases.package_id GROUP BY packages.name, ureleases.version HAVING MIN(ureleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(ureleases.released) DESC, packages.name ASC"
			query_args = []
		elif package == None and distro != None:
			key = "/distro/%s/latest"%distro
			query = "SELECT packages.name, dreleases.version, dreleases.revision, MIN(dreleases.released) FROM packages, dreleases, repos, distros WHERE packages.id = dreleases.package_id AND repos.id = dreleases.repo_id AND distros.id = repos.distro_id AND distros.name = %s GROUP BY packages.name, dreleases.version, dreleases.revision HAVING MIN(dreleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(dreleases.released) DESC, packages.name ASC"
			query_args = (distro,)
		elif package != None and distro == None:
			key = "/pkg/%s/latest"%package
			query = "SELECT packages.name, ureleases.version, MIN(ureleases.released) FROM packages, ureleases WHERE packages.id = ureleases.package_id AND packages.name = %s GROUP BY packages.name, ureleases.version HAVING MIN(ureleases.released) >= current_timestamp - interval '1 day'ORDER BY MIN(ureleases.released) DESC"
			query_args = (package,)
		else:
			key = "/distro/%s/pkg/%s/latest"%(distro,package)
			query = "SELECT packages.name, dreleases.version, dreleases.revision, MIN(dreleases.released) FROM packages, dreleases, repos, distros WHERE packages.id = dreleases.package_id AND repos.id = dreleases.repo_id AND distros.id = repos.distro_id AND distros.name = %s AND packages.name = %s GROUP BY packages.name, dreleases.version, dreleases.revision HAVING MIN(dreleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(dreleases.released) DESC"
			query_args = (distro,package)
		
		now = datetime.datetime.now()
		day = datetime.timedelta(1)
		
		status = c.key_status(key)
		if status != None:
			self.releases = c.get(key)
			if status == Cache.STALE:
				t = threading.Thread(target=self.update, args=(key, query, query_args, package_id, distro_id))
				t.start()
		else:
			self.update(key, query, query_args, package_id, distro_id)
			
		self.today = len(self.releases)
Esempio n. 8
0
    async def get_from_backend(address_components):
        cache = Cache(OSMLookup.name)
        result = cache.get(address_components)
        is_cached = bool(result)
        if is_cached:
            request_count_cached.labels(OSMLookup.name).inc()
        else:
            result = osm_geocode_address(address_components)
            cache.save(address_components,
                       result)  # TODO do this in the background
        request_count.labels(OSMLookup.name).inc()

        point = Point((Decimal(result["lon"]), Decimal(result["lat"])))
        feature = Feature(
            geometry=point,
            properties={
                "service": OSMLookup.name,
                "timestamp":
                datetime.datetime.utcnow().isoformat() + "Z",  # poop
                "cached": is_cached,  # should this be a timestamp?
            },
        )

        return feature
Esempio n. 9
0
    def test_save_can_overwrite_data(self):
        cache = Cache("test", data_dir=self.tempdir)
        cache.save(self.address, {"foo": "bar"})
        cache.save(self.address, {"foo": "baz"})

        assert cache.get(self.address)["foo"] == "baz"
Esempio n. 10
0
 def test_get_returns_nothing_for_cold_cache(self):
     cache = Cache("test", data_dir=self.tempdir)
     assert not cache.get(self.address)
Esempio n. 11
0
class EventHandler(object):
    """Handle Hangups conversation events

    Args:
        bot: HangupsBot instance
    """
    def __init__(self, bot):
        self.bot = GenericEvent.bot = bot
        self.bot_command = ['/bot']

        self.pluggables = {
            "allmessages": [],
            "call": [],
            "membership": [],
            "message": [],
            "rename": [],
            "history": [],
            "sending": [],
            "typing": [],
            "watermark": [],
        }

        # timeout for messages to be received for reprocessing: 6hours
        receive_timeout = 60 * 60 * 6

        self._reprocessors = Cache(receive_timeout, increase_on_access=False)

        self._contexts = Cache(receive_timeout, increase_on_access=False)

        self._image_ids = Cache(receive_timeout, increase_on_access=False)

        self._executables = Cache(receive_timeout, increase_on_access=False)

    async def setup(self, _conv_list):
        """async init part of the handler

        Args:
            _conv_list: hangups.conversation.ConversationList instance
        """
        await plugins.tracking.start({
            "module": "handlers",
            "module.path": "handlers"
        })

        plugins.register_shared("reprocessor.attach_reprocessor",
                                self.attach_reprocessor)

        plugins.register_shared("chatbridge.behaviours", {})

        self._reprocessors.start()
        self._contexts.start()
        self._image_ids.start()
        self._executables.start()

        plugins.tracking.end()

        _conv_list.on_event.add_observer(self._handle_event)
        _conv_list.on_typing.add_observer(self._handle_status_change)
        _conv_list.on_watermark_notification.add_observer(
            self._handle_status_change)

    def register_handler(self,
                         function,
                         pluggable="message",
                         priority=50,
                         **kwargs):
        """register an event handler

        Args:
            function: callable, the handling function/coro
            pluggable: string, a pluggable of .pluggables
            priority: int, lower priorities receive the event earlier
            kwargs: dict, legacy to catch the positional argument 'type'

        Raises:
            KeyError: unknown pluggable specified
        """
        if 'type' in kwargs:
            pluggable = kwargs['type']
            logger.warning(
                'The positional argument "type" will be removed at '
                'any time soon.',
                stack_info=True)

        # a handler may use not all args or kwargs, inspect now and filter later
        expected = inspect.signature(function).parameters
        names = list(expected)

        current_plugin = plugins.tracking.current
        self.pluggables[pluggable].append(
            (function, priority, current_plugin["metadata"], expected, names))
        # sort by priority
        self.pluggables[pluggable].sort(key=lambda tup: tup[1])
        plugins.tracking.register_handler(function, pluggable, priority)

    def register_context(self, context):
        """register a message context that can be later attached again

        Args:
            context: dict, no keys are required

        Returns:
            string, a unique identifier for the context
        """
        context_id = None
        while context_id is None or context_id in self._contexts:
            context_id = str(uuid.uuid4())
        self._contexts[context_id] = context
        return context_id

    def register_reprocessor(self, func):
        """register a function that can be called later

        Args:
            func: a callable that takes three args: bot, event, command

        Returns:
            string, a unique identifier for the callable
        """
        reprocessor_id = None
        while reprocessor_id is None or reprocessor_id in self._reprocessors:
            reprocessor_id = str(uuid.uuid4())
        self._reprocessors[reprocessor_id] = func
        return reprocessor_id

    def attach_reprocessor(self, func, return_as_dict=None):
        """connect a func to an identifier to reprocess the event on receive

        reprocessor: map func to a hidden annotation to a message.
        When the message is sent and subsequently received by the bot, it will
        be passed to the func, which can modify the event object by reference
        before it runs through the event processing

        Args:
            func: callable that takes three arguments: bot, event, command
            return_as_dict: legacy code
        """
        #pylint:disable=unused-argument
        reprocessor_id = self.register_reprocessor(func)
        return {"id": reprocessor_id, "callable": func}

    # handler core

    async def image_uri_from(self, image_id, callback, *args, **kwargs):
        """retrieve a public url for an image upload

        Args:
            image_id: int, upload id of a previous upload
            callback: coro, awaitable callable
            args: tuple, positional arguments for the callback
            kwargs: dict, keyword arguments for the callback

        Returns:
            boolean, False if no url was awaitable after 60sec, otherwise True
        """
        # TODO(das7pad) refactor plugins to use bot._client.image_upload_raw

        # there was no direct way to resolve an image_id to the public url
        # without posting it first via the api.
        # plugins and functions can establish a short-lived task to wait for the
        # image id to be posted and retrieve the url in an asyncronous way

        ticks = 0
        while ticks < 60:
            if image_id in self._image_ids:
                await callback(self._image_ids[image_id], *args, **kwargs)
                return True
            await asyncio.sleep(1)
            ticks += 1
        return False

    async def run_reprocessor(self, reprocessor_id, event, *args, **kwargs):
        """reprocess the event with the callable that was attached on sending

        Args:
            reprocessor_id: string, a found reprocessor id
            event: hangupsbot event instance
        """
        reprocessor = self._reprocessors.get(reprocessor_id, pop=True)
        if reprocessor is None:
            return

        logger.info("reprocessor uuid found: %s", reprocessor_id)
        result = reprocessor(self.bot, event, reprocessor_id, *args, **kwargs)
        if asyncio.iscoroutinefunction(reprocessor):
            await result

    async def _handle_chat_message(self, event):
        """Handle an incoming conversation event

        - auto-optin opt-outed users if the event is in a 1on1
        - run connected event-reprocessor
        - forward the event to handlers:
            - allmessages, all events
            - message, if user is not the bot user
        - handle the text as command, if the user is not the bot user

        Args:
            event: event.ConversationEvent instance

        Raises:
            exceptions.SuppressEventHandling: do not handle the event at all
        """
        if not event.text:
            return
        if (not event.user.is_self
                and self.bot.conversations[event.conv_id]["type"]
                == "ONE_TO_ONE" and self.bot.user_memory_get(
                    event.user_id.chat_id, "optout") is True):
            logger.info("auto opt-in for %s", event.user.id_.chat_id)
            await command.run(self.bot, event, *["optout"])
            return

        event.syncroom_no_repeat = False
        event.passthru = {}
        event.context = {}

        # EventAnnotation - allows metadata to survive a trip to Google
        # pylint: disable=protected-access
        for annotation in event.conv_event._event.chat_message.annotation:
            if (annotation.type == 1025
                    and annotation.value in self._reprocessors):
                await self.run_reprocessor(annotation.value, event)
            elif annotation.type == 1027 and annotation.value in self._contexts:
                event.context = self._contexts[annotation.value]
                if "passthru" in event.context:
                    event.passthru = event.context["passthru"]

        # map image ids to their public uris in absence of any fixed server api
        if (event.passthru and "original_request" in event.passthru
                and "image_id" in event.passthru["original_request"]
                and event.passthru["original_request"]["image_id"]
                and len(event.conv_event.attachments) == 1):

            _image_id = event.passthru["original_request"]["image_id"]
            _image_uri = event.conv_event.attachments[0]

            if _image_id not in self._image_ids:
                self._image_ids[_image_id] = _image_uri
                logger.info("associating image_id=%s with %s", _image_id,
                            _image_uri)

        # first occurence of an executable id needs to be handled as an event
        if (event.passthru and event.passthru.get("executable")
                and event.passthru["executable"] not in self._executables):
            original_message = event.passthru["original_request"]["message"]
            linked_hangups_user = event.passthru["original_request"]["user"]
            logger.info("current event is executable: %s", original_message)
            self._executables[event.passthru["executable"]] = time.time()
            event.from_bot = False
            event.text = original_message
            event.user = linked_hangups_user
            event.user_id = linked_hangups_user.id_

        await self.run_pluggable_omnibus("allmessages", self.bot, event,
                                         command)
        if not event.from_bot:
            await self.run_pluggable_omnibus("message", self.bot, event,
                                             command)
            await self._handle_command(event)

    async def _handle_command(self, event):
        """Handle command messages

        Args:
            event: event.ConversationEvent instance
        """
        if not event.text:
            return

        bot = self.bot

        # is commands_enabled?
        config_commands_enabled = bot.get_config_suboption(
            event.conv_id, 'commands_enabled')
        tagged_ignore = "ignore" in bot.tags.useractive(
            event.user_id.chat_id, event.conv_id)

        if not config_commands_enabled or tagged_ignore:
            admins_list = bot.get_config_suboption(event.conv_id, 'admins')
            # admins always have commands enabled
            if event.user_id.chat_id not in admins_list:
                return

        # check that a bot alias is used e.g. /bot
        if not event.text.split()[0].lower() in self.bot_command:
            if (bot.conversations[event.conv_id]["type"] == "ONE_TO_ONE"
                    and bot.config.get_option('auto_alias_one_to_one')):
                # Insert default alias if not already present
                event.text = u" ".join((self.bot_command[0], event.text))
            else:
                return

        # Parse message, convert non-breaking space in Latin1 (ISO 8859-1)
        event.text = event.text.replace(u'\xa0', u' ')
        try:
            line_args = shlex.split(event.text, posix=False)
        except ValueError:
            logger.exception('shlex.split failed parsing "%s"', event.text)
            line_args = event.text.split()

        commands = command.get_available_commands(bot, event.user_id.chat_id,
                                                  event.conv_id)

        supplied_command = line_args[1].lower()
        if (supplied_command in commands["user"]
                or supplied_command in commands["admin"]):
            pass
        elif supplied_command in command.commands:
            await command.blocked_command(bot, event, *line_args[1:])
            return
        else:
            await command.unknown_command(bot, event, *line_args[1:])
            return

        # Run command
        results = await command.run(bot, event, *line_args[1:])

        if "acknowledge" in dir(event):
            for id_ in event.acknowledge:
                await self.run_reprocessor(id_, event, results)

    async def run_pluggable_omnibus(self, name, *args, **kwargs):
        """forward args to a group of handler which were registered for the name

        Args:
            name: string, a key in .pluggables
            args: tuple, positional arguments for each handler
            kwargs: dict, keyword arguments for each handler,
                may include '_run_concurrent_' to run them parallel

        Raises:
            KeyError: unknown pluggable specified
            HangupsBotExceptions.SuppressEventHandling: do not handle further
        """
        async def _run_single_handler(function, meta, expected, names):
            """execute a single handler function

            Args:
                function: callable
                meta: dict
                expected: ordered mapping of inspect.Parameter instances
                names: list of strings, keys in expected

            Raises:
                HangupsBotExceptions.SuppressAllHandlers:
                    skip handler of the current type
                HangupsBotExceptions.SuppressEventHandling:
                    skip all handler and do not handle this event further
            """
            message = [
                "%s: %s.%s" % (name, meta['module.path'], function.__name__)
            ]
            try:
                # a function may use not all args or kwargs, filter here
                positional = (args[num] for num in range(len(args)) if (
                    len(names) > num and (
                        expected[names[num]].default == inspect.Parameter.empty
                        or names[num] not in kwargs)))
                keyword = {
                    key: value
                    for key, value in kwargs.items() if key in names
                }

                logger.debug(message[0])
                result = function(*positional, **keyword)
                if asyncio.iscoroutinefunction(function):
                    await result

            except HangupsBotExceptions.SuppressHandler:
                # skip this handler, continue with next
                message.append("SuppressHandler")
                logger.debug(" : ".join(message))
            except HangupsBotExceptions.SuppressAllHandlers:
                # skip all other pluggables, but let the event continue
                message.append("SuppressAllHandlers")
                logger.debug(" : ".join(message))
                raise
            except HangupsBotExceptions.SuppressEventHandling:
                # handle requested to skip all pluggables
                raise
            except:  # capture all Exceptions   # pylint: disable=bare-except
                # exception is not related to the handling of this
                # pluggable, log and continue with the next handler
                message.append("args=" + str([str(arg) for arg in args]))
                message.append("kwargs=" + str(kwargs))
                logger.exception(" : ".join(message))

        try:
            if kwargs.pop('_run_concurrent_', False):
                await asyncio.gather(*[
                    _run_single_handler(function, meta, expected, names)
                    for function, dummy, meta, expected, names in
                    self.pluggables[name].copy()
                ])
                return

            for (function, dummy, meta, expected,
                 names) in self.pluggables[name].copy():
                await _run_single_handler(function, meta, expected, names)

        except HangupsBotExceptions.SuppressAllHandlers:
            pass

        except HangupsBotExceptions.SuppressEventHandling:
            # handle requested to do not handle the event at all, skip all
            # handler and do not continue with event handling in the parent
            raise

    async def _handle_event(self, conv_event):
        """Handle conversation events

        Args:
            conv_event: hangups.conversation_event.ConversationEvent instance
        """
        event = ConversationEvent(conv_event)

        if isinstance(conv_event, hangups.ChatMessageEvent):
            pluggable = None

        elif isinstance(conv_event, hangups.MembershipChangeEvent):
            pluggable = "membership"

        elif isinstance(conv_event, hangups.RenameEvent):
            pluggable = "rename"

        elif isinstance(conv_event, hangups.OTREvent):
            pluggable = "history"

        elif isinstance(conv_event, hangups.HangoutEvent):
            pluggable = "call"

        else:
            # Unsupported Events:
            # * GroupLinkSharingModificationEvent
            # https://github.com/tdryer/hangups/blob/master/hangups/conversation_event.py
            logger.warning("unrecognised event type: %s", type(conv_event))
            return

        # rebuild permamem for a conv including conv-name, participants, otr
        await self.bot.conversations.update(event.conv, source="event")

        if pluggable is None:
            asyncio.ensure_future(self._handle_chat_message(event))
            return

        asyncio.ensure_future(
            self.run_pluggable_omnibus(pluggable, self.bot, event, command))

    async def _handle_status_change(self, state_update):
        """run notification handler for a given state_update

        Args:
            state_update: hangups.parsers.TypingStatusMessage or
             hangups.parsers.WatermarkNotification instance
        """
        if isinstance(state_update, hangups.parsers.TypingStatusMessage):
            pluggable = "typing"
            event = TypingEvent(state_update)

        else:
            pluggable = "watermark"
            event = WatermarkEvent(state_update)

        asyncio.ensure_future(
            self.run_pluggable_omnibus(pluggable, self.bot, event, command))
Esempio n. 12
0
class DataLoader:
    """
    Class responsible for combining raw dataset data into the form consumed by the networks.
    It is also responsible for sending them to the GPU.
    Make sure the data functions are thread safe!

    Constructor arguments:
        Adapter -- the dataset adapter serving the raw data
        minibatch_size -- the number of elements per minibatch
        data_function -- data_function for network input/output. See attribute docstring.
        [split_limits] -- optional limit for the elements in the train, test and val sets.
        [noshuffle=False] -- if set, epoch elements are not shuffled
        [caching=True] -- whether or not to cache data function calls
        [gpu_caching=True] -- whether or not to cache data function calls in gpu memory
    """
    def __init__(self, adapter, **kwargs):
        self.adapter = adapter
        "The dataset adapter serving the raw data."

        self.queue = []
        "A queue for the elements still to be served this epoch."

        self.split_limits = {'train': None, 'test': None, 'val': None}
        "Optional limits for the number of elements in train, test, and val sets."

        self.noshuffle = False
        "If set, epoch elements are not shuffled."

        self.current_minibatch = None
        "The current minibatch index in the epoch."

        self.current_phase = None
        "The current phase (train, test or val)."

        self.minibatch_size = None
        "The amount of elements per minibatch."

        self.cache = Cache(enabled=kwargs.pop('caching', True),
                           gpu=kwargs.pop('gpu_caching', True))
        "The cache used by the data function calls. By default, caches everything in GPU memory."

        self.data_function = None
        """
        Function that serves the input and target data for a given minibatch element from a given adapter.
        The minibatch dimension should already be added - they are concatenated along the first dimension.
        
        This function should handle any desired caching itself, using the passed cache.
        Input: adapter, element [, cache]
        Output: (input, target) tuple
        Both input and target should be a tuple
        """

        self._logger = None
        "Logger to handle output."

        self.center_crop_size = None
        "Used by the patch-based data servers to crop the center view."

        self.refinement_experiment = None

        self.nr_neighbours = 4

        self.restricted_nr_views = 1
        "Used by some data loader functions"

        self.__dict__.update(kwargs)

        if self.refinement_experiment is not None:
            self.refinement_experiment = experiment_handler.ExperimentHandler.load_experiment_from_file(
                self.refinement_experiment)

    def get_nr_neighbours(self):
        "Silly function to be backwards compatible to previous versions that didn't have this field"
        try:
            return self.nr_neighbours
        except Exception:
            return 4

    def set_logger(self, logger):
        self._logger = logger

    def get_data_name(self):
        "Returns an informative name for the underlying data."
        return self.adapter.get_dataset_name()

    def initialize_phase(self, phase):
        """Initialize the epoch element list, shuffle it, and reset state."""
        self.queue = self.adapter.split[phase].copy()
        if self.split_limits[phase] is not None:
            self.queue = self.queue[:self.split_limits[phase]]
        if not self.noshuffle and phase == "train":
            random.shuffle(self.queue)
        self.current_minibatch = -1
        self.current_phase = phase

    def get_minibatch_count(self):
        """Returns the total number of minibatches this epoch."""
        return math.ceil(len(self.queue) / self.minibatch_size)

    def get_epoch_size(self):
        """Returns the total number of elements this epoch."""
        return len(self.queue)

    def get_minibatch_size(self):
        """Returns the number of elements this minibatch."""
        return min(
            self.minibatch_size,
            len(self.queue) - self.minibatch_size * self.current_minibatch)

    def __next__(self):
        """Returns the next minibatch in the current epoch. Generator."""
        self.current_minibatch += 1
        if self.current_minibatch >= self.get_minibatch_count():
            raise StopIteration()
        # at this point, we should make sure they are on the GPU
        # if they were before, this is a cheap operation
        (data_in, target) = self.get_minibatch_data(self.data_function)
        data_in = [x.to(GPU) for x in data_in]
        target = [x.to(GPU) for x in target]
        return data_in, target

    def __iter__(self):
        return self

    def get_minibatch_elements(self):
        """Returns the elements in the current minibatch."""
        elements = []
        for i in range(self.get_minibatch_size()):
            elements.append(
                self.queue[i + self.minibatch_size * self.current_minibatch])
        return elements

    def get_minibatch_data(self, data_function):
        """Load the requested data for all elements of the current minibatch."""
        minibatch_elements = self.get_minibatch_elements()
        minibatch_datas_in = []
        minibatch_datas_gt = []
        for element in minibatch_elements:
            data_args = (self, element)
            element_data = data_function(*data_args)
            minibatch_datas_in.append(element_data[0])
            minibatch_datas_gt.append(element_data[1])
            nr_in = len(element_data[0])
            nr_gt = len(element_data[1])

        minibatch_data_in = []
        minibatch_data_gt = []
        for idx_in in range(nr_in):
            subdata = [x[idx_in] for x in minibatch_datas_in]
            minibatch_data_in.append(torch.cat(subdata, dim=0))
        for idx_gt in range(nr_gt):
            subdata = [x[idx_gt] for x in minibatch_datas_gt]
            minibatch_data_gt.append(torch.cat(subdata, dim=0))

        return (minibatch_data_in, minibatch_data_gt)

    def data_function_cameras_worldtf(self, element):
        """
        Example data function for getting an element's cameras as input and world transform as output.
        Low-dimensional data for framework sanity checks.
        """
        return ((self.cache.get(self.adapter.get_element_cameras,
                                (element, )).unsqueeze(0), ),
                (self.cache.get(self.adapter.get_element_worldtf,
                                (element, )).unsqueeze(0), ))

    def data_function_neighbours_and_depth(self, element):
        """
        Example data function serving the central view and its neighbours (+ cameras),
        and its depth map as the target
        """
        # decide on a central view
        cameras = self.cache.get(self.adapter.get_element_cameras, (element, ))
        neighbours = None
        while neighbours is None:
            center_view = random.randint(0, self.adapter.nr_views - 1)
            neighbours = self.adapter.get_view_neighbours(
                cameras, center_view, self.get_nr_neighbours())

        # get the central depth map
        center_image = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))
        center_depth_map = self.cache.get(self.adapter.get_single_depth_map,
                                          (element, center_view, True))

        neighbour_images = [center_image]
        neighbour_cameras = [cameras[center_view]]
        for neighbour in neighbours:
            neighbour_images.append(
                self.cache.get(self.adapter.get_single_image,
                               (element, neighbour)))
            neighbour_cameras.append(cameras[neighbour])
        neighbour_images = [
            torch.unsqueeze(x, dim=0) for x in neighbour_images
        ]
        neighbour_cameras = [
            torch.unsqueeze(x, dim=0) for x in neighbour_cameras
        ]

        views = torch.cat(neighbour_images, dim=0)
        cameras = torch.cat(neighbour_cameras, dim=0)

        return ((views.unsqueeze(0), cameras.unsqueeze(0)),
                (center_depth_map.unsqueeze(0), ))

    def data_function_color_depth_and_gt(self, element, center_view=None):
        """
        Example data function serving the central view and its estimated depth,
        and the gt depth as the target.
        """
        # decide on a central view
        if center_view is None:
            center_view = random.randint(0, self.adapter.nr_views - 1)

        # get the central image, estimate and GT depth map
        center_image = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))
        center_estimate = self.cache.get(self.adapter.get_single_depth_map,
                                         (element, center_view, False))
        center_depth_map = self.cache.get(self.adapter.get_single_depth_map,
                                          (element, center_view, True))

        return ((center_image[None], center_estimate[None]),
                (center_depth_map[None], ))

    def data_function_all_color_depth_and_gt(self, element):
        """
        Example data function serving the central view and its estimated depth, for all views,
        and the gt depth as the target.
        """
        # get the central image, estimate and GT depth map
        center_images = self.cache.get(self.adapter.get_element_images,
                                       (element, ))
        center_estimates = self.cache.get(self.adapter.get_element_depth_maps,
                                          (element, False))
        center_depth_maps = self.cache.get(self.adapter.get_element_depth_maps,
                                           (element, True))

        if self.restricted_nr_views != 0:
            center_images = center_images[:self.restricted_nr_views]
            center_estimates = center_estimates[:self.restricted_nr_views]
            center_depth_maps = center_depth_maps[:self.restricted_nr_views]

        return ((center_images, center_estimates), (center_depth_maps, ))

    def data_function_patched_color_depth_and_gt(self,
                                                 element,
                                                 center_view=None):
        """
        Example data function serving the central view and its estimated depth,
        and the gt depth as the target.
        """
        # decide on a central view
        center_view = random.randint(0, self.adapter.nr_views - 1)

        # get the central image, estimate and GT depth map
        center_image = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))
        center_estimate = self.cache.get(self.adapter.get_single_depth_map,
                                         (element, center_view, False))
        center_depth_map = self.cache.get(self.adapter.get_single_depth_map,
                                          (element, center_view, True))

        if self.center_crop_size is not None:
            Cc = self.center_crop_size
            x0 = np.random.randint(0, center_image.shape[2] - Cc)
            y0 = np.random.randint(0, center_image.shape[1] - Cc)
            center_image = center_image[:, y0:y0 + Cc, x0:x0 + Cc]
            center_estimate = center_estimate[:, y0:y0 + Cc, x0:x0 + Cc]
            center_depth_map = center_depth_map[:, y0:y0 + Cc, x0:x0 + Cc]

        return ((center_image[None], center_estimate[None]),
                (center_depth_map[None], ))

    def data_function_color_depth_and_color_gt(self, element):
        """
        Example data function serving the central view and its estimated depth,
        and the gt depth as the target.
        """
        # decide on a central view
        center_view = random.randint(0, self.adapter.nr_views - 1)

        # get the central image, estimate and GT depth map
        center_image = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))
        center_estimate = self.cache.get(self.adapter.get_single_depth_map,
                                         (element, center_view, False))
        center_depth_map = self.cache.get(self.adapter.get_single_depth_map,
                                          (element, center_view, True))

        return ((center_image[None], center_estimate[None]), (
            center_image[None],
            center_depth_map[None],
        ))

    def data_function_neighbours_and_depth_and_normals(self, element):
        """
        Example data function serving the central view and its neighbours (+ cameras),
        and its depth map as the target, as well as its normal map
        """
        # decide on a central view
        cameras = self.cache.get(self.adapter.get_element_cameras, (element, ))
        neighbours = None
        while neighbours is None:
            center_view = random.randint(0, self.adapter.nr_views - 1)
            neighbours = self.adapter.get_view_neighbours(
                cameras, center_view, self.get_nr_neighbours())

        # get the central depth map
        center_image = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))
        center_depth_map = self.cache.get(self.adapter.get_single_depth_map,
                                          (element, center_view, True))
        center_normal_map = self.cache.get(self.adapter.get_single_normal_map,
                                           (element, center_view))

        neighbour_images = [center_image]
        neighbour_cameras = [cameras[center_view]]
        for neighbour in neighbours:
            neighbour_images.append(
                self.cache.get(self.adapter.get_single_image,
                               (element, neighbour)))
            neighbour_cameras.append(cameras[neighbour])
        neighbour_images = [
            torch.unsqueeze(x, dim=0) for x in neighbour_images
        ]
        neighbour_cameras = [
            torch.unsqueeze(x, dim=0) for x in neighbour_cameras
        ]

        views = torch.cat(neighbour_images, dim=0)
        cameras = torch.cat(neighbour_cameras, dim=0)

        return ((views.unsqueeze(0), cameras.unsqueeze(0)), (
            center_depth_map.unsqueeze(0),
            center_normal_map.unsqueeze(0),
        ))

    def data_function_depth_volumes_and_gt(self, element):
        """
        Example data function serving all of the depth map estimates (+ cameras),
        and a GT depth map as the target. Order of the viewset is random (camera matrices adjusted)
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        viewset = list(self.adapter.valid_centerviews)
        random.shuffle(viewset)
        estimates = []
        cameras = []

        for view in viewset:
            estimate = self.cache.get(self.adapter.get_single_depth_map,
                                      (element, view, False))
            estimates.append(torch.unsqueeze(estimate, dim=0))
            cameras.append(torch.unsqueeze(all_cameras[view], dim=0))

        gt_depth = self.cache.get(self.adapter.get_single_depth_map,
                                  (element, viewset[0], True))

        views = torch.cat(estimates, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((views, cameras), (gt_depth.unsqueeze(0), ))

    def data_function_refinement(self, element, center_view=None):
        """
        Example data function serving central color, neighbouring depth map estimates (+ cameras),
        and a GT depth map as the target. Order of the viewset is random (camera matrices adjusted).
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        center_color = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))[None]
        estimates = [
            self.cache.get(self.adapter.get_single_depth_map,
                           (element, center_view, False))[None]
        ]
        cameras = [all_cameras[center_view][None]]

        for view in neighbours:
            estimate = self.cache.get(self.adapter.get_single_depth_map,
                                      (element, view, False))
            estimates.append(estimate[None])
            cameras.append(all_cameras[view][None])

        gt_depth = self.cache.get(self.adapter.get_single_depth_map,
                                  (element, center_view, True))[None]

        center_color = center_color.unsqueeze(0)
        depths = torch.cat(estimates, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((center_color, depths, cameras), (gt_depth, ))

    def data_function_refinement_full(self, element, center_view=None):
        """
        Example data function serving color, neighbouring depth map estimates (+ cameras),
        and a GT depth map as the target. Order of the viewset is random (camera matrices adjusted).
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        colors = [
            self.cache.get(self.adapter.get_single_image,
                           (element, center_view))[None]
        ]
        estimates = [
            self.cache.get(self.adapter.get_single_depth_map,
                           (element, center_view, False))[None]
        ]
        cameras = [all_cameras[center_view][None]]

        gt_depth = self.cache.get(self.adapter.get_single_depth_map,
                                  (element, center_view, True))[None]

        for view in neighbours:
            color = self.cache.get(self.adapter.get_single_image,
                                   (element, view))
            colors.append(color[None])
            estimate = self.cache.get(self.adapter.get_single_depth_map,
                                      (element, view, False))
            estimates.append(estimate[None])
            cameras.append(all_cameras[view][None])

        colors = torch.cat(colors, dim=0).unsqueeze(0)
        depths = torch.cat(estimates, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((colors, depths, cameras), (gt_depth, ))

    def data_function_refinement_patched(self, element, center_view=None):
        """
        Example data function serving color, neighbouring depth map estimates (+ cameras),
        and a GT depth map as the target. Order of the viewset is random (camera matrices adjusted).
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        color_center = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))[None]
        estimate_center = self.cache.get(self.adapter.get_single_depth_map,
                                         (element, center_view, False))[None]
        camera_center = all_cameras[center_view][None]

        colors = []
        estimates = []
        cameras = []

        gt_center = self.cache.get(self.adapter.get_single_depth_map,
                                   (element, center_view, True))[None]

        if self.center_crop_size is not None:
            Cc = self.center_crop_size
            x0 = np.random.randint(0, color_center.shape[3] - Cc)
            y0 = np.random.randint(0, color_center.shape[2] - Cc)
            color_center = color_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            estimate_center = estimate_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            gt_center = gt_center[:, :, y0:y0 + Cc, x0:x0 + Cc]

            # also move the optical center for the camera
            opticalcenter_shifter = torch.Tensor(
                np.array([[1, 0, -x0], [0, 1, -y0], [0, 0, 1]]))[None]
            opticalcenter_shifter = opticalcenter_shifter.to(
                camera_center.device)
            camera_center = torch.matmul(opticalcenter_shifter, camera_center)

        for view in neighbours:
            color = self.cache.get(self.adapter.get_single_image,
                                   (element, view))
            colors.append(color[None])
            estimate = self.cache.get(self.adapter.get_single_depth_map,
                                      (element, view, False))
            estimates.append(estimate[None])
            cameras.append(all_cameras[view][None])

        colors = torch.cat(colors, dim=0).unsqueeze(0)
        estimates = torch.cat(estimates, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((color_center, colors, estimate_center, estimates,
                 camera_center, cameras), (gt_center, ))

    def data_function_successive_refinement_full(self,
                                                 element,
                                                 center_view=None):
        """
        Example data function serving color, neighbouring depth map estimates (+ cameras),
        and a GT depth map as the target. Order of the viewset is random (camera matrices adjusted).
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        colors = [
            self.cache.get(self.adapter.get_single_image,
                           (element, center_view))[None]
        ]
        estimate, trust = self.cache.get(
            self.adapter.get_single_depth_map_and_trust,
            (element, center_view, False))
        estimates = [estimate[None]]
        trusts = [trust[None]]
        cameras = [all_cameras[center_view][None]]

        gt_depth = self.cache.get(self.adapter.get_single_depth_map,
                                  (element, center_view, True))[None]

        for view in neighbours:
            color = self.cache.get(self.adapter.get_single_image,
                                   (element, view))
            colors.append(color[None])
            estimate, trust = self.cache.get(
                self.adapter.get_single_depth_map_and_trust,
                (element, view, False))
            estimates.append(estimate[None])
            trusts.append(trust[None])
            cameras.append(all_cameras[view][None])

        colors = torch.cat(colors, dim=0).unsqueeze(0)
        depths = torch.cat(estimates, dim=0).unsqueeze(0)
        trusts = torch.cat(trusts, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((colors, depths, trusts, cameras), (gt_depth, ))

    def data_function_successive_refinement_patched(self,
                                                    element,
                                                    center_view=None):
        """
        This performs pre-refinement of the depth maps
        """

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        color_center = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))[None]
        estimate_center, trust_center = self.cache.get(
            self.adapter.get_single_depth_map_and_trust,
            (element, center_view, False))
        estimate_center = estimate_center[None]
        trust_center = trust_center[None]
        camera_center = all_cameras[center_view][None]

        gt_center = self.cache.get(self.adapter.get_single_depth_map,
                                   (element, center_view, True))[None]

        if self.center_crop_size is not None:
            Cc = self.center_crop_size
            x0 = np.random.randint(0, color_center.shape[3] - Cc)
            y0 = np.random.randint(0, color_center.shape[2] - Cc)
            color_center = color_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            estimate_center = estimate_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            trust_center = trust_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            gt_center = gt_center[:, :, y0:y0 + Cc, x0:x0 + Cc]

            # also move the optical center for the camera
            opticalcenter_shifter = torch.Tensor(
                np.array([[1, 0, -x0], [0, 1, -y0], [0, 0, 1]]))[None]
            opticalcenter_shifter = opticalcenter_shifter.to(
                camera_center.device)
            camera_center = torch.matmul(opticalcenter_shifter, camera_center)

        colors = []
        estimates = []
        trusts = []
        cameras = []

        for view in neighbours:
            color = self.cache.get(self.adapter.get_single_image,
                                   (element, view))
            colors.append(color[None])
            estimate, trust = self.cache.get(
                self.adapter.get_single_depth_map_and_trust,
                (element, view, False))
            estimates.append(estimate[None])
            trusts.append(trust[None])
            cameras.append(all_cameras[view][None])

        colors = torch.cat(colors, dim=0).unsqueeze(0)
        depths = torch.cat(estimates, dim=0).unsqueeze(0)
        trusts = torch.cat(trusts, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((color_center, colors, estimate_center, depths, trust_center,
                 trusts, camera_center, cameras), (gt_center, ))

    def data_function_in_memory_successive_refinement_patched(
            self, element, center_view=None):
        """
        This performs pre-refinement of the depth maps
        """

        pr_loader = self.refinement_experiment._data_loader
        pr_loader.center_crop_size = None
        pr_datafn = self.refinement_experiment.get(
            'data_loader_options')['data_function']
        pr_network = self.refinement_experiment.network

        all_cameras = self.cache.get(self.adapter.get_element_cameras,
                                     (element, ))
        if center_view is None:
            viewset = list(self.adapter.valid_centerviews)
            random.shuffle(viewset)
            center_view = viewset[0]
        neighbours = self.adapter.get_view_neighbours(all_cameras, center_view,
                                                      self.get_nr_neighbours())

        color_center = self.cache.get(self.adapter.get_single_image,
                                      (element, center_view))[None]
        ctr_input = [
            x.cuda()
            for x in pr_datafn(pr_loader, element, center_view=center_view)[0]
        ]
        with torch.no_grad():
            estimate_center, trust_center = pr_network(*ctr_input)
        camera_center = all_cameras[center_view][None]

        colors = []
        estimates = []
        trusts = []
        cameras = []

        gt_center = self.cache.get(self.adapter.get_single_depth_map,
                                   (element, center_view, True))[None]

        if self.center_crop_size is not None:
            Cc = self.center_crop_size
            x0 = np.random.randint(0, color_center.shape[3] - Cc)
            y0 = np.random.randint(0, color_center.shape[2] - Cc)
            color_center = color_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            estimate_center = estimate_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            trust_center = trust_center[:, :, y0:y0 + Cc, x0:x0 + Cc]
            gt_center = gt_center[:, :, y0:y0 + Cc, x0:x0 + Cc]

            # also move the optical center for the camera
            opticalcenter_shifter = torch.Tensor(
                np.array([[1, 0, -x0], [0, 1, -y0], [0, 0, 1]]))[None]
            opticalcenter_shifter = opticalcenter_shifter.to(
                camera_center.device)
            camera_center = torch.matmul(opticalcenter_shifter, camera_center)

        for view in neighbours:
            color = self.cache.get(self.adapter.get_single_image,
                                   (element, view))
            colors.append(color[None])
            nbr_input = [
                x.cuda()
                for x in pr_datafn(pr_loader, element, center_view=view)[0]
            ]
            with torch.no_grad():
                estimate, trust = pr_network(*nbr_input)
            estimates.append(estimate)
            trusts.append(trust)
            cameras.append(all_cameras[view][None])

        colors = torch.cat(colors, dim=0).unsqueeze(0)
        estimates = torch.cat(estimates, dim=0).unsqueeze(0)
        trusts = torch.cat(trusts, dim=0).unsqueeze(0)
        cameras = torch.cat(cameras, dim=0).unsqueeze(0)

        return ((color_center, colors, estimate_center, estimates,
                 trust_center, trusts, camera_center, cameras), (gt_center, ))
Esempio n. 13
0
class FileFetchAndCache:
    ''' Fetch files over HTTP and cache their headers and contents.
        Return files from the cache if they have not changed.
    '''
    # HTTP header name constants (and cache keys)
    __if_none_match: ClassVar[str] = 'If-None-Match'
    __if_modified_since: ClassVar[str] = 'If-Modified-Since'
    __etag: ClassVar[str] = 'ETag'
    __last_modified: ClassVar[str] = 'Last-Modified'
    __file_bytes: ClassVar[str] = 'file_bytes'  # cache key
    __file_hash: ClassVar[str] = 'file_hash'  # cache key

    def __init__(self, verbose: bool = False) -> None:
        self.verbose = verbose
        self.cache = Cache()

    ''' Fetch an URL and cache the contents.
        Arguments:
            host: hostname:port
            URL: URL to fetch starting with /
        Returns a tuple of:
            success: bool
            from_cache: bool
            file_bytes: bytes
    '''

    def get(self, host: str = None, URL: str = None) -> tuple:
        success: bool = False
        from_cache: bool = False
        file_bytes: bytes = None
        file_hash: str = None
        request_headers: dict = {}

        # First check our cache for the headers from the URL,
        # if we find them, add headers to our request
        etag = self.cache.get(URL, FileFetchAndCache.__etag)
        last_mod = self.cache.get(URL, FileFetchAndCache.__last_modified)
        if etag is not None and last_mod is not None:
            request_headers[FileFetchAndCache.__if_none_match] = etag
            request_headers[FileFetchAndCache.__if_modified_since] = last_mod

        conn = http.client.HTTPSConnection(host)
        if self.verbose:
            conn.set_debuglevel(1)
        conn.request('GET', URL, headers=request_headers)
        resp = conn.getresponse()

        # Get and cache the headers we care about from the response
        etag = resp.getheader(FileFetchAndCache.__etag)
        if etag is not None:
            self.cache.set(URL, FileFetchAndCache.__etag, etag)
        last_mod = resp.getheader(FileFetchAndCache.__last_modified)
        if last_mod is not None:
            self.cache.set(URL, FileFetchAndCache.__last_modified, last_mod)

        # If we fetched the file the first time, cache it
        if resp.status == 200:
            file_bytes = resp.read()
            self.cache.set(URL, FileFetchAndCache.__file_bytes, file_bytes)
            file_hash = Hash.md5(file_bytes)
            self.cache.set(URL, FileFetchAndCache.__file_hash, file_hash)
            success = True
        elif resp.status == 304:
            file_bytes = self.cache.get(URL, FileFetchAndCache.__file_bytes)
            file_hash = self.cache.get(URL, FileFetchAndCache.__file_hash)
            success = True
            from_cache = True
        conn.close()

        if self.verbose:
            print(resp.status, resp.reason)
            print(self.cache)

        return success, from_cache, file_bytes