Beispiel #1
0
class Storage:

    def __init__(self, mode=StorageModes.MEMORY):
        self.mode = mode
        if self.mode == StorageModes.PERSISTENT:
            self.cache = SqliteDict('../my_db.sqlite', autocommit=True)
        elif self.mode == StorageModes.MEMORY:
            self.cache = dict()

    def set(self, k, v):

        self.cache[k] = v
        if self.mode == StorageModes.PERSISTENT:
            # need to commit manually, as autocomit only commits with commit(blocking=False) and might not persist data
            self.cache.commit()

    def dump(self, k):
        self.cache.pop(k)
        if self.mode == StorageModes.PERSISTENT:
            # need to commit manually, as autocomit only commits with commit(blocking=False) and might not persist data
            self.cache.commit()

    def get(self, k):
        return self.cache.get(k)

    def append(self, k, v):
        current_data = self.cache.get(k)
        if not current_data:
            self.set(k, [v])
        else:
            if not isinstance(current_data, list):
                current_data = [current_data]
            current_data.append(v)
            self.set(k, current_data)
Beispiel #2
0
class GoogleDrive:
    auth_url = 'https://accounts.google.com/o/oauth2/v2/auth'
    token_url = 'https://www.googleapis.com/oauth2/v4/token'
    api_url = 'https://www.googleapis.com/drive/'
    redirect_url = 'urn:ietf:wg:oauth:2.0:oob'
    scopes = ['https://www.googleapis.com/auth/drive.readonly']

    def __init__(self, config, client_id: str, client_secret: str, token_path: str, cache_path: str):
        self.cfg = config
        self.client_id = client_id
        self.client_secret = client_secret
        self.token_path = token_path
        self.cache_path = cache_path
        self.cache = SqliteDict(self.cache_path, tablename='cache', encode=json.dumps, decode=json.loads,
                                autocommit=False)
        self.transcodes_cache = ExpiringDict(max_len=5000, max_age_seconds=2 * (60 * 60))
        self.token = self._load_token()
        self.token_refresh_lock = Lock()
        self.http = self._new_http_object()

    ############################################################
    # CORE CLASS METHODS
    ############################################################

    def get_auth_link(self):
        auth_url, state = self.http.authorization_url(self.auth_url, access_type='offline', prompt='select_account')
        return auth_url

    def exchange_code(self, code: str):
        token = self.http.fetch_token(self.token_url, code=code, client_secret=self.client_secret)
        if 'access_token' in token:
            self._token_saver(token)
        return self.token

    def query(self, path: str, method: str = 'GET', fetch_all_pages: bool = False, callbacks={}, **kwargs):
        resp: Response = None
        pages: int = 1
        resp_json = {}
        request_url = self.api_url + path.lstrip('/') if not path.startswith('http') else path

        try:
            while True:
                resp = self._do_query(request_url, method, **kwargs)
                log.debug(f"Request URL: {resp.url}")
                log.debug(f"Request ARG: {kwargs}")
                log.debug(f'Response Status: {resp.status_code} {resp.reason}')

                if 'stream' in kwargs and kwargs['stream']:
                    return True, resp, None

                if 'Content-Type' in resp.headers and 'json' in resp.headers['Content-Type']:
                    if fetch_all_pages:
                        resp_json.pop('nextPageToken', None)
                    new_json = resp.json()
                    # does this page have changes
                    extended_changes = False
                    changes = []
                    if 'changes' in new_json:
                        if 'changes' in resp_json:
                            changes.extend(resp_json['changes'])
                        changes.extend(new_json['changes'])
                        extended_changes = True

                    resp_json.update(new_json)
                    if extended_changes:
                        resp_json['changes'] = changes
                else:
                    return False if resp.status_code != 200 else True, resp, resp.text

                # call page_token_callback to update cached page_token, if specified
                if 'page_token_callback' in callbacks:
                    if 'nextPageToken' in resp_json:
                        callbacks['page_token_callback'](resp_json['nextPageToken'])
                    elif 'newStartPageToken' in resp_json:
                        callbacks['page_token_callback'](resp_json['newStartPageToken'])

                # call data_callback, fetch_all_pages is true
                if fetch_all_pages and 'data_callback' in callbacks:
                    callbacks['data_callback'](resp.json(), callbacks)

                # handle nextPageToken
                if fetch_all_pages and 'nextPageToken' in resp_json and resp_json['nextPageToken']:
                    # there are more pages
                    pages += 1
                    log.info("Fetching extra results from page %d", pages)
                    if 'params' in kwargs:
                        kwargs['params'].update({'pageToken': resp_json['nextPageToken']})
                    elif 'json' in kwargs:
                        kwargs['json'].update({'pageToken': resp_json['nextPageToken']})
                    elif 'data' in kwargs:
                        kwargs['data'].update({'pageToken': resp_json['nextPageToken']})
                    continue

                break

            return True if resp_json and len(resp_json) else False, resp, resp_json if (
                    resp_json and len(resp_json)) else resp.text

        except Exception:
            log.exception(f"Exception sending request to {request_url} with kwargs={kwargs}: ")
            return False, resp, None

    ############################################################
    # DRIVE FUNCTIONS
    ############################################################

    def validate_access_token(self):
        success, resp, data = self.query('/v3/changes/startPageToken',
                                         params={'supportsTeamDrives': self.cfg.google.teamdrive})
        if success and resp.status_code == 200:
            if 'startPageToken' not in data:
                log.error("Failed validate up to date access_token:\n\n%s\n", data)
                return False
            return True
        else:
            log.error("Error validating access token, status_code = %d, data =\n\n%s\n",
                      resp.status_code if resp is not None else 0, data)
        return False

    def get_changes(self, new_items_callback=None, removed_items_callback=None):
        callbacks = {'page_token_callback': self._page_token_saver,
                     'data_callback': self._process_changes}

        if new_items_callback:
            callbacks['new_items_callback'] = new_items_callback
        if removed_items_callback:
            callbacks['removed_items_callback'] = removed_items_callback

        success, resp, data = self.query('/v3/changes', params={
            'pageToken': self.token['page_token'] if 'page_token' in self.token else '1', 'pageSize': 1000,
            'includeRemoved': True,
            'includeTeamDriveItems': self.cfg.google.teamdrive,
            'supportsTeamDrives': self.cfg.google.teamdrive,
            'fields': 'changes(file(md5Checksum,mimeType,modifiedTime,'
                      'name,parents,teamDriveId,trashed),'
                      'fileId,removed,teamDrive(id,name),'
                      'teamDriveId),newStartPageToken,nextPageToken'}, fetch_all_pages=True,
                                         callbacks=callbacks)
        return

    def get_file(self, file_id, stream=True, headers=None, timeout=30):
        req_url = '/v2/files/%s' % file_id if not file_id.startswith('http') else file_id
        success, resp, data = self.query(req_url, params={
            'includeTeamDriveItems': self.cfg.google.teamdrive,
            'supportsTeamDrives': self.cfg.google.teamdrive,
            'alt': 'media'
        }, stream=stream, headers=headers, timeout=timeout)
        return resp

    def get_stream_link(self, file_id):
        # validate / refersh current access_token
        if not self.validate_access_token():
            return ''
        log.debug("Validated access_token is current")

        # generate url
        req = Request('GET', f'{self.api_url.rstrip("/")}/v2/files/{file_id}',
                      params={'includeTeamDriveItems': self.cfg.google.teamdrive,
                              'supportsTeamDrives': self.cfg.google.teamdrive,
                              'alt': 'media',
                              'access_token': self.token['access_token']}).prepare()
        log.debug(f'Direct Stream URL: {req.url}')
        return req.url

    def get_transcodes(self, file_id):
        # do we have the transcoded versions already cached within the last 5 minutes?
        cached_transcodes = self.transcodes_cache.get(file_id, None)
        if cached_transcodes is not None and len(cached_transcodes):
            log.debug(f"Loaded {len(cached_transcodes)} transcode streams from temporary cache for: {file_id}")
            return cached_transcodes

        # retrieve transcoded versions from google docs
        success, resp, data = self.query(f'https://docs.google.com/get_video_info?docid={file_id}')
        if not success or (not data or 'fmt_stream_map' not in data or 'fmt_list' not in data):
            log.error(f"Failed to find transcoded versions data for: {file_id}")
            return None

        # parse main response
        tmp = parse_qs(data)
        tmp_versions = tmp['fmt_list'][0]
        tmp_stream_map = tmp['fmt_stream_map'][0]
        drive_stream_cookie = resp.cookies.get('DRIVE_STREAM', '')

        # parse required variables
        transcode_versions = {}
        transcode_streams = {}

        # parse version list
        for version in tmp_versions.split(','):
            tmp_v = version.split('/')
            transcode_versions[tmp_v[0]] = tmp_v[1].split('x')[1]

        if not len(transcode_versions):
            log.error(f"Failed to parse transcoded versions (fmt_list) for: {file_id}")
            return None

        # parse transcode lists
        for stream in tmp_stream_map.split(','):
            tmp_s = stream.split('|')
            transcode_streams[transcode_versions[tmp_s[0]]] = tmp_s[1]

        if not len(transcode_streams):
            log.error(f"Failed to parse transcoded streams (fmt_stream_map) for: {file_id}")
            return None

        # cache the transcode streams for 5 minutes
        self.transcodes_cache[file_id] = transcode_streams
        log.debug(f"Added {len(transcode_streams)} transcode streams to temporary cache for: {file_id}")
        return transcode_streams

    ############################################################
    # CACHE
    ############################################################

    def get_id_metadata(self, item_id, teamdrive_id=None):
        # return cache from metadata if available
        cached_metadata = self._get_cached_metdata(item_id)
        if cached_metadata:
            return True, cached_metadata

        # does item_id match teamdrive_id?
        if teamdrive_id is not None and item_id == teamdrive_id:
            success, resp, data = self.query('v3/teamdrives/%s' % str(item_id))
            if success and resp.status_code == 200 and 'name' in data:
                # we successfully retrieved this teamdrive info, lets place a mimeType key in the result
                # so we know it needs to be cached
                data['mimeType'] = 'application/vnd.google-apps.folder'
        else:
            # retrieve file metadata
            success, resp, data = self.query('v3/files/%s' % str(item_id),
                                             params={
                                                 'supportsTeamDrives': self.cfg.google.teamdrive,
                                                 'fields': 'id,md5Checksum,mimeType,modifiedTime,name,parents,'
                                                           'trashed,teamDriveId'})
        if success and resp.status_code == 200:
            return True, data
        else:
            log.error("Error retrieving metadata for item %r:\n\n%s\n", item_id, data)
            return False, data

    def get_id_file_paths(self, item_id, teamdrive_id=None):
        file_paths = []
        added_to_cache = 0

        try:
            def get_item_paths(obj_id, path, paths, new_cache_entries, teamdrive_id=None):
                success, obj = self.get_id_metadata(obj_id, teamdrive_id)
                if not success:
                    return new_cache_entries

                teamdrive_id = teamdrive_id if 'teamDriveId' not in obj else obj['teamDriveId']

                # add item object to cache if we know its not from cache
                if 'mimeType' in obj:
                    # we know this is a new item fetched from the api, because the cache does not store this field
                    self.add_item_to_cache(obj['id'], obj['name'], [] if 'parents' not in obj else obj['parents'])
                    new_cache_entries += 1

                if path.strip() == '':
                    path = obj['name']
                else:
                    path = os.path.join(obj['name'], path)

                if 'parents' in obj and obj['parents']:
                    for parent in obj['parents']:
                        new_cache_entries += get_item_paths(parent, path, paths, new_cache_entries, teamdrive_id)

                if (not obj or 'parents' not in obj or not obj['parents']) and len(path):
                    paths.append(path)
                    return new_cache_entries
                return new_cache_entries

            added_to_cache += get_item_paths(item_id, '', file_paths, added_to_cache, teamdrive_id)
            if added_to_cache:
                log.debug("Dumping cache due to new entries!")
                self._dump_cache()

            if len(file_paths):
                return True, file_paths
            else:
                return False, file_paths

        except Exception:
            log.exception("Exception retrieving filepaths for '%s': ", item_id)

        return False, []

    def add_item_to_cache(self, item_id, item_name, item_parents):
        if item_id not in self.cache:
            log.debug("Added '%s' to cache: %s", item_id, item_name)
        self.cache[item_id] = {'name': item_name, 'parents': item_parents}
        return

    def remove_item_from_cache(self, item_id):
        if self.cache.pop(item_id, None):
            return True
        return False

    def get_item_name_from_cache(self, item_id):
        try:
            item = self.cache.get(item_id)
            return item['name'] if isinstance(item, dict) else 'Unknown'
        except Exception:
            pass
        return 'Unknown'

    def get_item_from_cache(self, item_id):
        try:
            item = self.cache.get(item_id, None)
            return item
        except Exception:
            pass
        return None

    ############################################################
    # INTERNALS
    ############################################################

    def _do_query(self, request_url: str, method: str, **kwargs):
        tries: int = 0
        max_tries: int = 2
        lock_acquirer: bool = False
        resp: Response = None
        use_timeout: int = 30

        # override default timeout
        if 'timeout' in kwargs and isinstance(kwargs['timeout'], int):
            use_timeout = kwargs['timeout']
            kwargs.pop('timeout', None)

        # remove un-needed kwargs
        kwargs.pop('fetch_all_pages', None)
        kwargs.pop('page_token_callback', None)

        # do query
        while tries < max_tries:
            if self.token_refresh_lock.locked() and not lock_acquirer:
                log.debug("Token refresh lock is currently acquired... trying again in 500ms")
                time.sleep(0.5)
                continue

            if method == 'POST':
                resp = self.http.post(request_url, timeout=use_timeout, **kwargs)
            elif method == 'PATCH':
                resp = self.http.patch(request_url, timeout=use_timeout, **kwargs)
            elif method == 'DELETE':
                resp = self.http.delete(request_url, timeout=use_timeout, **kwargs)
            else:
                resp = self.http.get(request_url, timeout=use_timeout, **kwargs)
            tries += 1

            if resp.status_code == 401 and tries < max_tries:
                # unauthorized error, lets refresh token and retry
                self.token_refresh_lock.acquire(False)
                lock_acquirer = True
                log.warning(f"Unauthorized Response (Attempts {tries}/{max_tries})")
                self.token['expires_at'] = time() - 10
                self.http = self._new_http_object()
            else:
                break

        return resp

    def _load_token(self):
        try:
            if not os.path.exists(self.token_path):
                return {}

            with open(self.token_path, 'r') as fp:
                return json.load(fp)
        except Exception:
            log.exception(f"Exception loading token from {self.token_path}: ")
        return {}

    def _dump_token(self):
        try:
            with open(self.token_path, 'w') as fp:
                json.dump(self.token, fp, indent=2)
            return True
        except Exception:
            log.exception(f"Exception dumping token to {self.token_path}: ")
        return False

    def _token_saver(self, token: dict):
        # update internal token dict
        self.token.update(token)
        try:
            if self.token_refresh_lock.locked():
                self.token_refresh_lock.release()
        except Exception:
            log.exception("Exception releasing token_refresh_lock: ")
        self._dump_token()
        log.info("Renewed access token!")
        return

    def _page_token_saver(self, page_token: str):
        # update internal token dict
        self.token['page_token'] = page_token
        self._dump_token()
        return

    def _new_http_object(self):
        return OAuth2Session(client_id=self.client_id, redirect_uri=self.redirect_url, scope=self.scopes,
                             auto_refresh_url=self.token_url, auto_refresh_kwargs={'client_id': self.client_id,
                                                                                   'client_secret': self.client_secret},
                             token_updater=self._token_saver, token=self.token)

    def _get_cached_metdata(self, item_id):
        if item_id in self.cache:
            return self.cache[item_id]
        return None

    def _dump_cache(self):
        self.cache.commit()
        return

    def _remove_unwanted_paths(self, paths_list: list, mime_type: str):
        # remove paths that were not allowed - this is always enabled
        for item_path in copy(paths_list):
            allowed_path = False
            for allowed_file_path in self.cfg.google.allowed.file_paths:
                if item_path.lower().startswith(allowed_file_path.lower()):
                    allowed_path = True
                    break
            if not allowed_path:
                log.debug("Ignoring %r because its not an allowed path", item_path)
                paths_list.remove(item_path)
                continue

        # remove unallowed extensions
        if self.cfg.google.allowed.file_extensions:
            for item_path in copy(paths_list):
                allowed_file = False
                for allowed_extension in self.cfg.google.allowed.file_extensions_list:
                    if item_path.lower().endswith(allowed_extension.lower()):
                        allowed_file = True
                        break
                if not allowed_file:
                    log.debug("Ignoring %r because it was not an allowed extension", item_path)
                    paths_list.remove(item_path)

        # remove unallowed mimes
        if self.cfg.google.allowed.mime_types:
            allowed_file = False
            for allowed_mime in self.cfg.google.allowed.mime_types_list:
                if allowed_mime.lower() in mime_type.lower():
                    if 'video' in mime_type.lower():
                        # we want to validate this is not a .sub file, which for some reason, google shows as video/MP2G
                        double_checked_allowed = True
                        for item_path in paths_list:
                            if item_path.lower().endswith('.sub'):
                                double_checked_allowed = False
                        if double_checked_allowed:
                            allowed_file = True
                            break
                    else:
                        allowed_file = True
                        break

            if not allowed_file:
                log.debug("Ignoring %s because it was not an allowed mime: %s", paths_list, mime_type)
                for item_path in copy(paths_list):
                    paths_list.remove(item_path)

    def _process_changes(self, data: dict, callbacks: dict = {}):
        removed_file_paths = {}
        added_file_paths = {}
        if not data or 'changes' not in data:
            log.error("There were no changes to process")
            return
        log.info("Processing %d changes", len(data['changes']))

        # process changes
        for change in data['changes']:
            if 'file' in change and 'fileId' in change:
                # dont consider trashed/removed events for processing
                if ('trashed' in change['file'] and change['file']['trashed']) or (
                        'removed' in change and change['removed']):
                    # store the removed file paths - only if we have this item cached, otherwise we are not interested
                    # as we would not have stored it anyway...
                    item_exists = self.get_item_from_cache(change['fileId'])
                    if item_exists is not None:
                        success, item_paths = self.get_id_file_paths(change['fileId'],
                                                                     change['file']['teamDriveId'] if 'teamDriveId'
                                                                                                      in
                                                                                                      change['file']
                                                                     else None)
                        self._remove_unwanted_paths(item_paths, change['file']['mimeType'] if 'mimeType' in change[
                            'file'] else 'Unknown')
                        if success and len(item_paths):
                            if change['fileId'] in removed_file_paths:
                                removed_file_paths[change['fileId']].extend(item_paths)
                            else:
                                removed_file_paths[change['fileId']] = item_paths

                    # remove item from cache
                    if self.remove_item_from_cache(change['fileId']):
                        log.debug("Removed '%s' from cache: %s", change['fileId'], change['file']['name'])

                    continue

                existing_cache_item = self.get_item_from_cache(change['fileId'])
                existing_success, existing_cache_item_paths = self.get_id_file_paths(change['fileId'],
                                                                                     change['file']['teamDriveId']
                                                                                     if 'teamDriveId' in change[
                                                                                         'file'] else None) if \
                    existing_cache_item is not None else (None, None)

                # we always want to add changes to the cache so renames etc can be reflected inside the cache
                self.add_item_to_cache(change['fileId'], change['file']['name'],
                                       [] if 'parents' not in change['file'] else change['file']['parents'])

                # dont process folder events
                if 'mimeType' in change['file'] and 'vnd.google-apps.folder' in change['file']['mimeType']:
                    # ignore this change as we dont want to scan folders
                    continue

                # get this files paths
                success, item_paths = self.get_id_file_paths(change['fileId'],
                                                             change['file']['teamDriveId'] if 'teamDriveId' in change[
                                                                 'file'] else None)

                # remove unwanted paths
                if existing_success and len(existing_cache_item_paths):
                    self._remove_unwanted_paths(existing_cache_item_paths,
                                                change['file']['mimeType'] if 'mimeType' in change[
                                                    'file'] else 'Unknown')
                if success and len(item_paths):
                    self._remove_unwanted_paths(item_paths, change['file']['mimeType'] if 'mimeType' in change[
                        'file'] else 'Unknown')

                # was this an existing item?
                if (existing_cache_item is not None and existing_success and len(existing_cache_item_paths)) and (
                        success and len(item_paths)):
                    # this was an existing item, and we are re-processing it again
                    # we need to find the differences between the before and after paths.
                    existing_path_set = set(existing_cache_item_paths)
                    new_path_set = set(item_paths)

                    removed_item_paths = existing_path_set.difference(new_path_set)
                    added_item_paths = new_path_set.difference(existing_path_set)

                    if len(removed_item_paths):
                        if change['fileId'] in removed_file_paths:
                            removed_file_paths[change['fileId']].extend(list(removed_item_paths))
                        else:
                            removed_file_paths[change['fileId']] = list(removed_item_paths)
                    if len(added_item_paths):
                        if change['fileId'] in added_file_paths:
                            added_file_paths[change['fileId']].extend(list(added_item_paths))
                        else:
                            added_file_paths[change['fileId']] = list(added_item_paths)

                elif success and len(item_paths):
                    # these are new paths/files that were not already in the cache
                    if change['fileId'] in added_file_paths:
                        added_file_paths[change['fileId']].extend(item_paths)
                    else:
                        added_file_paths[change['fileId']] = item_paths

            elif 'teamDrive' in change and 'teamDriveId' in change:
                # this is a teamdrive change
                # dont consider trashed/removed events for processing
                if 'removed' in change and change['removed']:
                    # remove item from cache
                    if self.remove_item_from_cache(change['teamDriveId']):
                        log.info("Removed teamDrive '%s' from cache: %s", change['teamDriveId'],
                                 change['teamDrive']['name'] if 'name' in change[
                                     'teamDrive'] else 'Unknown teamDrive')
                    continue

                if 'id' in change['teamDrive'] and 'name' in change['teamDrive']:
                    # we always want to add changes to the cache so renames etc can be reflected inside the cache
                    self.add_item_to_cache(change['teamDrive']['id'], change['teamDrive']['name'], [])
                    continue

        # always dump the cache after running changes
        self._dump_cache()
        log.info('%d added / %d removed', len(added_file_paths), len(removed_file_paths))

        # call further callbacks
        if len(removed_file_paths) and 'removed_items_callback' in callbacks:
            callbacks['removed_items_callback'](removed_file_paths)
        if len(added_file_paths) and 'new_items_callback' in callbacks:
            callbacks['new_items_callback'](added_file_paths)

        return
Beispiel #3
0
class DropNotClient(Thread):
    def __init__(self, sync_dir, target, port):
        """
        Initialise the DropNot Client
        :param sync_dir: Directory to synchronise on the client machine
        :param target: Target DropNot Server IP/domain
        :param port: Port to access DropNot Server on
        """
        logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                            filename='client.log',
                            level=logging.INFO)
        self.sync_dir = sync_dir
        self.target = target
        self.port = port
        # Our key-value stores for client metadata
        self.file_db = SqliteDict('client_tracker.db',
                                  tablename='files',
                                  encode=json.dumps,
                                  decode=json.loads)
        self.folder_db = SqliteDict('client_tracker.db',
                                    tablename='folders',
                                    encode=json.dumps,
                                    decode=json.loads)
        Thread.__init__(self)

    def run(self):
        """
        Start the DropNot Client
        :return: None
        """
        # Start a DirectoryListener thread to scan for file changes
        dir_worker = DirectoryListener(dir=self.sync_dir,
                                       change_callback=self.on_change,
                                       file_db=self.file_db,
                                       folder_db=self.folder_db)
        Thread(name='dir_listener',
               target=DirectoryListener.scan_directory(dir_worker))

    def on_change(self, change: ChangeType, path):
        """
        Handle a change in the synced folder. Callback for the DirectoryListener
        :param change: Value of ChangeType defining what was changed
        :param path: Absolute path of changed file/folder
        :return: None
        """
        # Switch windows path separators to forward slashes for rest URL
        rel_path = os.path.relpath(path, self.sync_dir).replace("\\", "/")

        # Handle change in synced folder
        if change == ChangeType.CreatedFolder:
            self.sync_new_folder(path, rel_path)
        elif change == ChangeType.DeletedFolder:
            self.sync_del_folder(path, rel_path)
        elif change == ChangeType.DeletedFile:
            self.sync_del_file(path, rel_path)
        elif change == ChangeType.CreatedFile:
            self.sync_new_file(path, rel_path)
        elif change == ChangeType.ModifiedFile:
            self.sync_edit_file(path, rel_path)
        else:
            logging.critical('Unsupported ChangeType')
            raise RuntimeError('Unsupported ChangeType')

    def sync_new_folder(self, path, rel_path):
        """
        Synchronise creation of a folder to the server
        :param path: Path including sync directory on Client machine
        :param rel_path: Relative path from sync directory
        :return: None
        """
        folder_encoding = FileUtils.get_folder_encoding(path, rel_path)
        resp = requests.post('http://{}:{}/sync/{}'.format(
            self.target, self.port, rel_path),
                             json=repr(folder_encoding))
        if resp.status_code == 200:
            logging.info('Folder synced to remote:{}'.format(rel_path))
            folder_encoding.sync = True
            self.folder_db[path] = repr(folder_encoding)
            self.folder_db.commit()
        elif resp.status_code == 400 or resp.status_code == 400:
            logging.error('Folder sync unsuccessful{}'.format(rel_path))
            self.folder_db[path] = repr(folder_encoding)
            self.folder_db.commit()
        else:
            logging.critical(
                'Unsupported response status code from remote:{}'.format(
                    resp.status_code))
            raise NotImplementedError('Unsupported resp status code:',
                                      resp.status_code)

    def sync_del_folder(self, path, rel_path):
        """
        Synchronise deletion of a folder to the server
        :param path: Path including sync directory on Client machine
        :param rel_path: Relative path from sync directory
        :return: None
        """
        resp = requests.delete('http://{}:{}/sync/{}'.format(
            self.target, self.port, rel_path),
                               json={"type": "folder"})
        if resp.status_code == 200:
            logging.info('Folder removed from remote:{}'.format(rel_path))
            self.folder_db.pop(path, None)
            self.folder_db.commit()
        elif resp.status_code == 400 or resp.status_code == 422:
            logging.error(
                'Folder removal failed on remote:{}'.format(rel_path))
            # todo: handle this?
        else:
            logging.critical(
                'Unsupported response status code from remote:{}'.format(
                    resp.status_code))
            raise NotImplementedError('Unsupported resp status code:',
                                      resp.status_code)

    def sync_del_file(self, path, rel_path):
        """
        Synchronise deletion of a file to the server
        :param path: Path including sync directory on Client machine
        :param rel_path: Relative path from sync directory
        :return: None
        """
        resp = requests.delete('http://{}:{}/sync/{}'.format(
            self.target, self.port, rel_path),
                               json={"type": "file"})
        if resp.status_code == 200:
            logging.info('File removed from remote:{}'.format(rel_path))
            self.file_db.pop(path, None)
            self.file_db.commit()
        elif resp.status_code == 400 or resp.status_code == 400:
            logging.error('File removal failed on remote:{}'.format(rel_path))
            # todo: handle this?
        else:
            logging.critical(
                'Unsupported response status code from remote:{}'.format(
                    resp.status_code))
            raise NotImplementedError('Unsupported resp status code:',
                                      resp.status_code)

    def sync_new_file(self, path, rel_path):
        """
        Synchronise a new file to the server
        :param path: Path including sync directory on Client machine
        :param rel_path: Relative path from sync directory
        :return: None
        """
        file_encoding = FileUtils.get_file_encoding(path, rel_path)
        file_metadata = FileUtils.get_metadata(file_encoding)

        if self.check_file_exists(file_metadata.md5):
            # Only send metadata, not the binary
            resp = requests.post('http://{}:{}/sync/{}'.format(
                self.target, self.port, rel_path),
                                 json=repr(file_metadata))
        else:
            # Send metadata and binary
            resp = requests.post('http://{}:{}/sync/{}'.format(
                self.target, self.port, rel_path),
                                 json=repr(file_encoding))

        if resp.status_code == 200:
            logging.info('File creation synced to remote:{}'.format(rel_path))
            file_metadata.sync = True
            self.file_db[path] = repr(file_metadata)
            self.file_db.commit()
        elif resp.status_code == 400 or resp.status_code == 422:
            logging.error(
                'File creation sync unsuccessful:{}'.format(rel_path))
            self.file_db[path] = repr(file_metadata)
            self.file_db.commit()
            # todo: handle this?
        else:
            logging.critical(
                'Unsupported response status code from remote:{}'.format(
                    resp.status_code))
            raise NotImplementedError('Unsupported resp status code:',
                                      resp.status_code)

    def sync_edit_file(self, path, rel_path):
        """
        Synchronise a file edit to the server
        :param path: Path including sync directory on Client machine
        :param rel_path: Relative path from sync directory
        :return: None
        """
        file_encoding = FileUtils.get_file_encoding(path, rel_path)
        file_metadata = FileUtils.get_metadata(file_encoding)

        if self.check_file_exists(file_metadata.md5):
            # Only send metadata, not the binary
            resp = requests.put('http://{}:{}/sync/{}'.format(
                self.target, self.port, rel_path),
                                json=repr(file_metadata))
        else:
            # Send metadata and binary
            resp = requests.put('http://{}:{}/sync/{}'.format(
                self.target, self.port, rel_path),
                                json=repr(file_encoding))

        if resp.status_code == 200:
            logging.info(
                'File modification synced to remote:{}'.format(rel_path))
            file_metadata.sync = True
            self.file_db[path] = repr(file_metadata)
            self.file_db.commit()
        elif resp.status_code == 400 or resp.status_code == 422:
            logging.info(
                'File modification sync unsuccessful:{}'.format(rel_path))
            self.file_db[path] = repr(file_metadata)
            self.file_db.commit()
            # todo: handle this?
        else:
            logging.critical(
                'Unsupported response status code from remote:{}'.format(
                    resp.status_code))
            raise NotImplementedError('Unsupported resp status code:',
                                      resp.status_code)

    def check_file_exists(self, md5):
        """
        Check whether a file matching the md5 already exists on the remote
        :param md5: md5 hash
        :return: True if an identical file exists, False otherwise
        """
        resp = requests.get('http://{}:{}/sync/exists/{}'.format(
            self.target, self.port, md5))
        if resp.status_code == 200:
            return True
        else:
            return False
Beispiel #4
0
class Gdrive:
    def __init__(self, config, token_path, cache_path):
        self.cfg = config
        self.token_path = token_path
        self.cache_path = cache_path
        self.token = None
        self.cache = None

    def first_run(self):
        # token file
        if not os.path.exists(self.token_path):
            # token.json does not exist, lets do the first run auth process
            print(
                "Visit %s and authorize against the account you wish to use" %
                self.authorize_url())
            auth_code = raw_input('Enter authorization code: ')
            if self.first_access_token(auth_code) and self.token is not None:
                self.dump_token()
            else:
                logger.error(
                    "Failed to authorize with the supplied client_id/client_secret/auth_code..."
                )
                return False
        else:
            self.token = utils.load_json(self.token_path)

        # cache file
        self.cache = SqliteDict(self.cache_path,
                                tablename='cache',
                                encode=json.dumps,
                                decode=json.loads,
                                autocommit=False)
        return True

    def authorize_url(self):
        payload = {
            'client_id': self.cfg['GDRIVE']['CLIENT_ID'],
            'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
            'response_type': 'code',
            'access_type': 'offline',
            'scope': 'https://www.googleapis.com/auth/drive'
        }
        url = 'https://accounts.google.com/o/oauth2/v2/auth?' + urlencode(
            payload)
        return url

    def first_access_token(self, auth_code):
        logger.info("Requesting access token for auth code %r", auth_code)
        payload = {
            'code': auth_code,
            'client_id': self.cfg['GDRIVE']['CLIENT_ID'],
            'client_secret': self.cfg['GDRIVE']['CLIENT_SECRET'],
            'grant_type': 'authorization_code',
            'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
        }
        success, resp, data = self._make_request(
            'https://www.googleapis.com/oauth2/v4/token',
            data=payload,
            headers={},
            request_type='post')
        if success and resp.status_code == 200:
            logger.info("Retrieved first access token!")
            self.token = data
            self.token['page_token'] = ''
            return True
        else:
            logger.error("Error retrieving first access_token:\n%s", data)
            return False

    def refresh_access_token(self):
        logger.debug("Renewing access token...")
        payload = {
            'refresh_token': self.token['refresh_token'],
            'client_id': self.cfg['GDRIVE']['CLIENT_ID'],
            'client_secret': self.cfg['GDRIVE']['CLIENT_SECRET'],
            'grant_type': 'refresh_token',
        }
        success, resp, data = self._make_request(
            'https://www.googleapis.com/oauth2/v4/token',
            data=payload,
            headers={},
            request_type='post')
        if success and resp.status_code == 200 and 'access_token' in data:
            logger.info("Renewed access token!")

            refresh_token = self.token['refresh_token']
            page_token = self.token['page_token']
            self.token = data
            if 'refresh_token' not in self.token or not self.token[
                    'refresh_token']:
                self.token['refresh_token'] = refresh_token
            self.token['page_token'] = page_token
            self.dump_token()
            return True
        else:
            logger.error("Error renewing access token:\n%s", data)
            return False

    def get_changes_first_page_token(self):
        success, resp, data = self._make_request(
            'https://www.googleapis.com/drive/v3/changes/startPageToken',
            params={'supportsTeamDrives': self.cfg['GDRIVE']['TEAMDRIVE']})
        if success and resp.status_code == 200:
            if 'startPageToken' not in data:
                logger.error(
                    "Failed to retrieve startPageToken from returned startPageToken:\n%s",
                    data)
                return False
            self.token['page_token'] = data['startPageToken']
            self.dump_token()
            return True
        else:
            logger.error("Error retrieving first page token:\n%s", data)
            return False

    def get_changes(self):
        success, resp, data = self._make_request(
            'https://www.googleapis.com/drive/v3/changes',
            params={
                'pageToken':
                self.token['page_token'],
                'pageSize':
                1000,
                'includeRemoved':
                True,
                'includeTeamDriveItems':
                self.cfg['GDRIVE']['TEAMDRIVE'],
                'supportsTeamDrives':
                self.cfg['GDRIVE']['TEAMDRIVE'],
                'fields':
                'changes(file(md5Checksum,mimeType,modifiedTime,'
                'name,parents,teamDriveId,trashed),'
                'fileId,removed,teamDrive(id,name),'
                'teamDriveId),newStartPageToken,nextPageToken'
            })
        if success and resp.status_code == 200:
            # page token logic
            if data is not None and 'nextPageToken' in data:
                self.token['page_token'] = data['nextPageToken']
                self.dump_token()
            elif data is not None and 'newStartPageToken' in data:
                self.token['page_token'] = data['newStartPageToken']
                self.dump_token()
            else:
                logger.error(
                    "Unexpected response while polling for changes from page %s:\n%s",
                    str(self.token['page_token']), data)
                return False, data
            return True, data
        else:
            logger.error("Error getting page changes for page_token %r:\n%s",
                         self.token['page_token'], data)
            return False, data

    def get_id_metadata(self, item_id, teamdrive_id=None):
        # return cache from metadata if available
        cached_metadata = self._get_cached_metdata(item_id)
        if cached_metadata:
            return True, cached_metadata

        # does item_id match teamdrive_id?
        if teamdrive_id is not None and item_id == teamdrive_id:
            success, resp, data = self._make_request(
                'https://www.googleapis.com/drive/v3/teamdrives/%s' %
                str(item_id))
            if success and resp.status_code == 200 and 'name' in data:
                # we successfully retrieved this teamdrive info, lets place a mimeType key in the result
                # so we know it needs to be cached
                data['mimeType'] = 'application/vnd.google-apps.folder'
        else:
            # retrieve file metadata
            success, resp, data = self._make_request(
                'https://www.googleapis.com/drive/v3/files/%s' % str(item_id),
                params={
                    'supportsTeamDrives':
                    self.cfg['GDRIVE']['TEAMDRIVE'],
                    'fields':
                    'id,md5Checksum,mimeType,modifiedTime,name,parents,'
                    'trashed,teamDriveId'
                })
        if success and resp.status_code == 200:
            return True, data
        else:
            logger.error("Error retrieving metadata for item %r:\n%s", item_id,
                         data)
            return False, data

    def get_id_file_paths(self, item_id, teamdrive_id=None):
        file_paths = []
        added_to_cache = 0

        try:

            def get_item_paths(obj_id,
                               path,
                               paths,
                               new_cache_entries,
                               teamdrive_id=None):
                success, obj = self.get_id_metadata(obj_id, teamdrive_id)
                if not success:
                    return new_cache_entries

                teamdrive_id = teamdrive_id if 'teamDriveId' not in obj else obj[
                    'teamDriveId']

                # add item object to cache if we know its not from cache
                if 'mimeType' in obj:
                    # we know this is a new item fetched from the api, because the cache does not store this field
                    self.add_item_to_cache(
                        obj['id'], obj['name'],
                        [] if 'parents' not in obj else obj['parents'])
                    new_cache_entries += 1

                if path.strip() == '':
                    path = obj['name']
                else:
                    path = os.path.join(obj['name'], path)

                if 'parents' in obj and obj['parents']:
                    for parent in obj['parents']:
                        new_cache_entries += get_item_paths(
                            parent, path, paths, new_cache_entries,
                            teamdrive_id)

                if (not obj or 'parents' not in obj
                        or not obj['parents']) and len(path):
                    paths.append(path)
                    return new_cache_entries
                return new_cache_entries

            added_to_cache += get_item_paths(item_id, '', file_paths,
                                             added_to_cache, teamdrive_id)
            if added_to_cache:
                logger.debug("Dumping cache due to new entries!")
                self.dump_cache()

            if len(file_paths):
                return True, file_paths
            else:
                return False, file_paths

        except Exception:
            logger.exception("Exception retrieving filepaths for '%s': ",
                             item_id)

        return False, []

    # cache
    def add_item_to_cache(self, item_id, item_name, item_parents):
        if item_id not in self.cache:
            logger.info("Added '%s' to cache: %s", item_id, item_name)
        self.cache[item_id] = {'name': item_name, 'parents': item_parents}
        return

    def remove_item_from_cache(self, item_id):
        if self.cache.pop(item_id, None):
            return True
        return False

    # dump jsons
    def dump_token(self):
        utils.dump_json(self.token_path, self.token)
        return

    def dump_cache(self):
        self.cache.commit()
        return

    ############################################################
    # INTERNALS
    ############################################################

    # cache
    def _get_cached_metdata(self, item_id):
        if item_id in self.cache:
            return self.cache[item_id]
        return None

    # requests
    @backoff.on_predicate(backoff.expo,
                          lambda x: not x[0] and
                          ('error' in x[2] and 'code' in x[2]['error'] and x[2]
                           ['error']['code'] != 401),
                          max_tries=8)
    def _make_request(self,
                      url,
                      headers=None,
                      data=None,
                      params=None,
                      request_type='get'):
        refreshed_token = False

        while True:
            if headers is None and self.token:
                auth_headers = {
                    'Authorization': 'Bearer %s' % self.token['access_token'],
                }
            else:
                auth_headers = {}

            resp = None
            if request_type == 'get':
                resp = requests.get(
                    url,
                    params=params,
                    headers=headers if headers is not None else auth_headers,
                    timeout=30)
            elif request_type == 'post':
                resp = requests.post(
                    url,
                    data=data,
                    headers=headers if headers is not None else auth_headers,
                    timeout=30)
            else:
                return False, resp, {
                    'error': {
                        'code':
                        401,
                        'message':
                        'Invalid request_type was supplied to _make_request'
                    }
                }

            # response logic
            try:
                data = resp.json()
            except ValueError:
                logger.exception(
                    "Exception while decoding response from Google Drive for data:\n%s\nTraceback: ",
                    resp.text)
                return False, resp, {
                    'error': {
                        'code': resp.status_code,
                        'message':
                        'Failed to json decode Google Drive response'
                    }
                }

            if 'error' in data and 'code' in data['error'] and (
                    'message' in data['error']
                    and 'Invalid Credentials' in data['error']['message']):
                # the token has expired.
                if not refreshed_token:
                    refreshed_token = True
                    self.refresh_access_token()
                    continue
                else:
                    # attempt was already made to refresh token
                    return False, resp, data

            if resp.status_code == 200:
                return True, resp, data
            else:
                return False, resp, data
Beispiel #5
0
class BucketObject(object):

    def __init__(self, object_id=None, metadata={}, bucket=None):

        self.object_id = object_id

        self._bucket = bucket
        if bucket and isinstance(bucket, Bucket) and bucket.exists():
            self._bucket = bucket
            self._temp_path = os.path.join(self._bucket.bucket_path, 'tmp')
            self._data_path = os.path.join(self._bucket.bucket_path, 'data')
        else:
            raise falcon.HTTPBadRequest(
                title='InvalidBucketName', 
                description= 'The specified bucket is not valid'
            )

        # metadata key used for object identification in the storage
        self._metadata = dict()
        if 'object-key' in self._bucket.metadata and self._bucket.metadata['object-key'] in ['content-md5', 'content-sha1']:            
            self.OBJECT_KEY_BASE=self._bucket.metadata['object-key']
        else:
            self.OBJECT_KEY_BASE='content-sha1'
        self._objects_metadata = SqliteDict(os.path.join(bucket.bucket_path,'metadata.sqlite'), 'objects', autocommit=True)


    @property
    def metadata(self):

        if self.object_id and self.object_id in self._objects_metadata:
            return dict(self._objects_metadata[self.object_id])
        else:
            return self._metadata
    

    @metadata.setter
    def metadata(self, value):

        if value and isinstance(value, dict):

            # object_id doesn't set but object_id is in the value 
            if not self.object_id and self.OBJECT_KEY_BASE in value:
                self.object_id = value[self.OBJECT_KEY_BASE]

            # object_id is assigned
            if self.object_id:
                self._objects_metadata[self.object_id] = value
            
            # no object_id, stored temporary
            else:
                self._metadata = value    

        else:
            raise RuntimeError('Incorrect metadata type. Found "%s", expected "dict"') % type(value)

    @property
    def filepath(self):
        ''' returns the path to file in storage
        '''
        if self.object_id:
            return os.path.join(self._data_path, pairtree_path(self.object_id))
        else:
            return os.path.join(self._data_path, pairtree_path(self._metadata[self.OBJECT_KEY_BASE]))


    def exists(self):
        ''' returns True if the object is in the storage
        '''
        if self.object_id and self.object_id in self._objects_metadata:
            return True
        else:
            return False


    def store(self, stream=None):
        ''' store object into bucket
        '''

        # TODO: for later analysis

        # if not stream or not isinstance(stream, gunicorn.http.body.Body):
        #     raise falcon.HTTPInternalServerError(
        #         title = 'InvalidStreamType',
        #         description = 'The Stream type is invalid, %s' % type(stream)
        #     )

        if not stream:
            raise falcon.HTTPInternalServerError(
                title = 'FileEmpty',
                description = 'Attempt to store empty file'
            )

        temp_filepath = None
        with tempfile.NamedTemporaryFile(dir=self._temp_path, mode="wb", delete=False) as _file:
            temp_filepath = _file.name
            while True:
                chunk = stream.read(4096)
                if not chunk:
                    break
                _file.write(chunk)

        hashes = {
            'content-md5': self.filehash(temp_filepath, hashlib.md5()),
            'content-sha1': self.filehash(temp_filepath, hashlib.sha1()),
            'content-sha256': self.filehash(temp_filepath, hashlib.sha256())        
        }

        if self.validate(temp_filepath, hashes):
            self._metadata.update(hashes)

            self.object_id = self._metadata[self.OBJECT_KEY_BASE]

            if hashes[self.OBJECT_KEY_BASE] not in self._objects_metadata:
                movefile(temp_filepath, self.filepath)
                self.metadata = self._metadata
            else:
                os.remove(temp_filepath)
                raise falcon.HTTPConflict(
                    title="ObjectAlreadyExists",
                    description="The object already exists into the storage.",
                    headers={
                        'content-md5':  self.metadata['content-md5'],
                        'content-sha1': self.metadata['content-sha1'],
                        'content-sha256': self.metadata['content-sha256'],
                    }
                )


    def filehash(self, filepath, hashfunc):
        ''' returns file hash
        '''
        block_size = 2 ** 20
        with open(filepath, 'rb') as _file:
            while True:
                data = _file.read(block_size)
                if not data:
                    break
                hashfunc.update(data)
        return hashfunc.hexdigest()


    def validate(self, filepath, filehashes):
        ''' validate recieved file object
        '''
        if os.path.getsize(filepath) == 0:
            os.remove(filepath)
            raise falcon.HTTPBadRequest(
                title='ZeroContentLength',
                description='The content size is 0'
            )

        if 'content-length' in self._metadata and \
            os.path.getsize(filepath) != self._metadata['content-length']:
            os.remove(filepath)
            raise falcon.HTTPBadRequest(
                title='BadContentLength',
                description='The Content-length did not match'
            )

        if 'content-md5' in self._metadata and \
            filehashes['md5'] != self._metadata['content-md5']:
            os.remove(filepath)
            raise falcon.HTTPBadRequest(
                title='BadDigest',
                description='The Content-MD5 did not match'
            )

        if 'content-sha1' in self._metadata and \
            filehashes['sha1'] != self._metadata['content-sha1']:
            os.remove(filepath)
            raise falcon.HTTPBadRequest(
                title='BadDigest',
                description='The Content-SHA1 did not match'
            )

        return True

    def delete(self):
        ''' delete object
        '''
        if self.exists():
            if os.path.exists(self.filepath):
                os.remove(self.filepath)
            else:
                raise falcon.HTTPNotFound()
            self._objects_metadata.pop(self.object_id)


    def info(self):
        ''' returns object's metadata
        '''
        if self.object_id and self.object_id in self._objects_metadata:
            return self._objects_metadata[self.object_id]
        else:
            return self._metadata
Beispiel #6
0
class MyForm(QMainWindow):
    def __init__(self):
        super().__init__()
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        self.ui.button_load_data.clicked.connect(self.load)
        self.ui.edit_database_path.editingFinished.connect(self.load_from_lineEdit)
        self.ui.button_save.clicked.connect(self.save)
        self.ui.button_add.clicked.connect(self.add_entry)
        self.ui.button_plan.clicked.connect(self.gen_week_table)

    def closeEvent(self,event):
        close = QMessageBox()
        close.setText("Speichern ?")
        close.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
        close = close.exec()
        if close == QMessageBox.Yes:
            self.save()
            event.accept()
        else:
            event.accept()

    def load(self):
        # Load SQLdict
        #try:
            self.fname = QFileDialog.getOpenFileName(self, 'Open file','','Database *.sqlite') # gt file path
            self.ui.edit_database_path.setText('Loaded database: ' + self.fname[0]) # set path to LineEdit
            self.Kochbuch = SqliteDict(self.fname[0], autocommit=True)  # Lade Haupt dictionary/ Gerichte
            self.create_content_table()
        #except:
            #print('Cannot load specified file!\nError in main.load()')
            #pass

    def load_from_lineEdit(self):
        # Load from LineEdit
        try:
            self.fname[0] = self.ui.edit_database_path.text()[17:] # get text without prefix
            self.Kochbuch = SqliteDict(self.fname[0], autocommit=True)  # Lade Haupt dictionary/ Gerichte
            self.create_content_table()
        except:
            print('Cannot load specified file!\nError in main.load_from_lineEdit()')
            pass

    def save(self):
        # get items from content_table, update Kochbuch and commit it to database
        self.Kochbuch.clear()
        table = self.ui.content_table
        header_items = [table.model().headerData(i, Qt.Horizontal) for i in range(table.model().columnCount())]

        for row_index in range(self.ui.content_table.rowCount()): # Every row is one dish/gericht
            temp_dict = dict()
            for col_index, item in enumerate(header_items):
                temp_dict[item] = table.cellWidget(row_index,col_index).text()
            self.add_gericht(temp_dict)

    def add_entry(self):
        # Add empty entry to table
        row_cnt = self.ui.content_table.rowCount()
        col_cnt = self.ui.content_table.columnCount()
        self.ui.content_table.insertRow(row_cnt)

        for col_index in range(col_cnt):
            self.ui.content_table.setCellWidget(row_cnt, col_index, QLineEdit())
            if col_index == col_cnt - 1:  # Delete Option
                self.ui.content_table.setCellWidget(row_cnt, col_index, QPushButton('Delete'))
                self.ui.content_table.cellWidget(row_cnt, col_index).clicked.connect(self.remove_entry)

    def remove_entry(self):
        table = self.ui.content_table
        # --------------Remove Row------------
        column = table.currentColumn()
        row = table.currentRow()
        table.removeRow(row)
        # -------------Remove dict entry--------
        #name = table.cellWidget(row,0).text()
        #self.del_gericht(name)

    def create_content_table(self):
        # Creates the Widgets inside the Table
        table = self.ui.content_table
        table.setRowCount(len(self.Kochbuch))
        header_items = [table.model().headerData(i, Qt.Horizontal) for i in range(table.model().columnCount())]
        row_label = []
        col_cnt = table.model().columnCount()

        for row_index, val in enumerate(self.Kochbuch.items()):
            #row_label.append(str(row_index + 1) + ' ' + str(val[0]))
            for col_index in range(col_cnt):
                table.setCellWidget(row_index,col_index,QLineEdit())
                if col_index == col_cnt - 1: # Add Delete Button
                    table.setCellWidget(row_index, col_index, QPushButton('Delete'))
                    table.cellWidget(row_index,col_index).clicked.connect(self.remove_entry)
        #self.ui.content_table.setVerticalHeaderLabels(row_label)
        self.set_text_to_table(header_items)

    def set_text_to_table(self,header_items):
        table = self.ui.content_table
        for row_index, val in enumerate(self.Kochbuch.items()):
            table.cellWidget(row_index, 0).setText(val[0]) # Name column/ set Name
            #print(val[1].values())
            for col_index, item in enumerate(header_items[1:]):
                try:
                    table.cellWidget(row_index, col_index + 1).setText(val[1][item])
                except KeyError:
                    if item == None: # Used, that the delete button text will not be overwritten
                        pass
                    else:
                        # Set unfilled category empty
                        table.cellWidget(row_index, col_index + 1).setText('')

    def add_gericht(self, entries:dict):
        # Olf func args: name: str, Fisch: bool, Nudeln: bool, Vortag: bool, SE: bool, WE: bool, WE_Wichtung: float, Wichtung: float

        # Gerichte werden gespeichert in dict()
        # Jedes Gericht wird dabei Kategorisiert in:
        #       - Fisch: Bool
        #       - Nudeln: Bool
        #       - Vortag: Bool (Wenn Sonntags viel Bestellt wird das Essen vom Vortag machen)
        #       - SE: Bool (Sonntagsessen)
        #       - WE: Bool (Wochenendessen, wie z.B Holen/Bestellen)
        #       - WE_Wichtung: Float (Jedes Gericht soll die chance haben am WE dran zu kommen, Holen/Bestellen oder z.B. Rolladen sollen bevorzugt werden)
        #       - Wichtung: Float (Warkeit des Gerichtes ausgewählt zu werden, um doppelte zu vermeiden) 1.0 = Kommt dran; 0 = wird nicht dran kommen
        #       etc.
        # -------------------------------------------------------------------------------
        # Tortillas = dict() # Ein Gericht dict()
        # Tortillas['Fisch'] = False
        # Tortillas['Nudeln'] = False
        # Tortillas['Vortag'] = False
        # Tortillas['SE'] = False
        # Tortillas['WE'] = False
        # Tortillas['WE_Wichtung'] = 0.1
        # Tortillas['Wichtung'] = 1.0

        # Gerichte['Tortillas'] = Tortillas
        # -------------------------------------------------------------------------------
        '''name_dict = dict()
        name_dict['Fisch'] = Fisch
        name_dict['Nudeln'] = Nudeln
        name_dict['Vortag'] = Vortag
        name_dict['SE'] = SE
        name_dict['WE'] = WE
        name_dict['WE_Wichtung'] = WE_Wichtung
        name_dict['Wichtung'] = Wichtung'''
        name = entries['Name']
        self.Kochbuch[name] = entries

    def del_gericht(self,name):
        self.Kochbuch.pop(name)

    def update_kochbuch(self, Kochbuch: dict, name: str, kategorie: str, value: any):
        # Da hier mit dict im dict gearbeitet wird und dasa äußerste dict ein SQLdict ist,
        # müssen hier die Einträge ein bisschen umständlich verändert werden, um vom RAM in die SQL datei zu schreiben
        update = Kochbuch[name]
        update[kategorie] = value
        Kochbuch[name] = update

    def choose(self,dishes):
        choosed_dish = np.asarray(dishes)
        Wichtungen = choosed_dish[:,1].astype(np.float)
        #choosed_dish = np.sort(choosed_dish)

        # Find maximas in Wichtung column
        max_indizes = np.where(Wichtungen==np.amax(Wichtungen))
        finds = []
        for i in max_indizes[0]:
            finds.append(choosed_dish[i])
        # Choose dish
        # If len of finds > 1 use random int to choose dish
        if len(finds) > 1:
            dish_index = random.randint(0,len(finds) - 1)
            return finds[dish_index]
        else:
            return finds
        #print('test','\n',choosed_dish)


    def gen_week_table(self):
        # generate table of dishes day wise
        dishes = [i for i in self.Kochbuch.items()]
        dishes_cnt = len(dishes)

        usable_dishes = []
        possible_dishes_mon = []
        possible_dishes_tue = []
        possible_dishes_wed = []
        possible_dishes_thu = []
        possible_dishes_fri = []
        possible_dishes_sat = []
        possible_dishes_sun = []
        saison = 'Winter'

        for index, dish in enumerate(dishes):
            dish = dish[1]
            # Perform standard check, to reduce dishes according to seasons and weigth
            #if float(dish['Wichtung']) > 0.7 and  dish['Saison'] == saison:  # Standard check
            if float(dish['Wichtung']) > 0.7 and (dish['Saison'] == saison or dish['Saison'] == 'None'):
                usable_dishes.append(dish)

                # -----------Monday-------------
                # ------------------------------
                # Mondays should prefer Nudeln ---> Boni for Nudeln == True
                if dish['Fisch'] == 'False':
                    if dish['Nudeln'] == 'True':
                        possible_dishes_mon.append([dish['Name'], float(dish['Wichtung']) + 0.3])
                    else:
                        possible_dishes_mon.append([dish['Name'], float(dish['Wichtung'])])

               #-----------------------------------------------------------------------------

                # -----------Tuesday/Wednesday/Thursday-------------
                # --------------------------------------------------
                # Days without preferations
                if dish['Fisch'] == 'False' and dish['Vortag'] == 'False': # Standard check
                    possible_dishes_tue.append([dish['Name'], float(dish['Wichtung'])])
                    possible_dishes_wed.append([dish['Name'], float(dish['Wichtung'])])
                    possible_dishes_thu.append([dish['Name'], float(dish['Wichtung'])])

                # -----------------------------------------------------------------------------

                # -----------Friday-------------
                # ------------------------------
                # Fish prefered
                if dish['WE'] == 'True' and dish['SE'] == 'False':
                    if dish['Fisch'] == 'True':
                        possible_dishes_fri.append([dish['Name'], float(dish['Wichtung']) + 0.3])
                    else:
                        possible_dishes_fri.append([dish['Name'], float(dish['Wichtung'])])

                # -----------------------------------------------------------------------------

                # -----------Saturday-------------
                # --------------------------------
                # WE category prefered
                if dish['Fisch'] == 'False' and dish['SE'] == 'False' and dish['Vortag'] == 'False':
                    if dish['WE'] == 'True':
                        possible_dishes_sat.append([dish['Name'], float(dish['Wichtung']) + 0.3])
                    else:
                        possible_dishes_sat.append([dish['Name'], float(dish['Wichtung'])])

                # -----------------------------------------------------------------------------

                # -----------Sunday-------------
                # ------------------------------
                # SE highly prefered
                if dish['Fisch'] == 'False' and dish['Vortag'] == 'False':
                    if dish['SE'] == 'True':
                        possible_dishes_sun.append([dish['Name'], float(dish['Wichtung']) + 0.5])
                    else:
                        possible_dishes_sun.append([dish['Name'], float(dish['Wichtung'])])

        print('============================================================================')
        print('=================================Wochenplan=================================')
        print('============================================================================')
        print('Monday:  ', self.choose(possible_dishes_mon)[0])
        print('----------------------------------------------------------------------------')
        print('Tuesday: ', self.choose(possible_dishes_tue)[0])
        print('----------------------------------------------------------------------------')
        print('Wednesday: ', self.choose(possible_dishes_wed)[0])
        print('----------------------------------------------------------------------------')
        print('Thurday: ', self.choose(possible_dishes_thu)[0])
        print('----------------------------------------------------------------------------')
        print('Friday: ', self.choose(possible_dishes_fri)[0])
        print('----------------------------------------------------------------------------')
        print('Saturday: ', self.choose(possible_dishes_sat)[0])
        print('----------------------------------------------------------------------------')
        print('Sunday: ', self.choose(possible_dishes_sun)[0])
        print('============================================================================')


        #print(self.choose(possible_dishes_mon)[0])
        Speiseplan = collections.OrderedDict()
Beispiel #7
0
class SessionState:
    def __init__(self, cache_file: Path, cache_key: str, redis: str, user_requested=False):
        self.user_requested = user_requested
        self._cache_file = cache_file
        self._cache_key = cache_key
        self._cache: Optional[SqliteDict] = None
        random.seed()
        self._session_key = random.randint(0, 999999)
        self._redis = Redis(host=redis)

        if not user_requested:
            self._open()
            if self._cache_key != self._cache.get("_cache_key_", None):
                self._cache.close()
                self._cache: Optional[SqliteDict] = None
                self._cache_file.unlink()
                self._open()
                self._cache["_cache_key_"] = self._cache_key

        self.session = Session()
        # noinspection PyTypeChecker
        self.session.mount(
            'https://',
            HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])))
        self.sites = {}
        self.wikidata = Sparql()
        self.primary_site = self.get_site(primary_domain)

    def __enter__(self):
        return self

    def __exit__(self, typ, value, traceback):
        self.session.close()
        if self._cache is not None:
            self._cache.close()
            self._cache = None
            print(f'Closed SQL connection for {self._session_key} at {datetime.utcnow()}')

    def _open(self):
        if self._cache is None:
            print(f'Opening SQL connection for {self._session_key} at {datetime.utcnow()}')
            self._cache_file.parent.mkdir(parents=True, exist_ok=True)
            self._cache = SqliteDict(self._cache_file, autocommit=True)

    def get_site(self, domain: Domain) -> WikiSite:
        try:
            return self.sites[domain]
        except KeyError:
            # noinspection PyTypeChecker
            site = WikiSite(domain, self.session, domain == primary_domain)
            if self.user_requested:
                site.maxlag = None
            self.sites[domain] = site
            return site

    def delete_cached_items(self, prefix: str) -> None:
        self._open()
        for vv in {v for v in self._cache.keys() if v.startswith(prefix)}:
            del self._cache[vv]

    def del_obj(self, key: str) -> Any:
        self._redis.delete(self.redis_key(key))
        self._open()
        print(f"%% del {key}")
        return self._cache.pop(key, None)

    def load_obj(self, key: str, default: Any = None) -> Any:
        value = self._redis.get(self.redis_key(key))
        if value is not None:
            return loads(value)
        self._open()
        print(f"%% load {key}")
        value = self._cache.get(key, default)
        self._redis.set(self.redis_key(key), dumps(value))
        return value

    def save_obj(self, key: str, value: Any):
        self._open()
        print(f"%% save {key}")
        self._cache[key] = value
        self._redis.set(self.redis_key(key), dumps(value))

    def redis_key(self, key: str):
        return self._cache_key + key