Пример #1
0
    def test_download_file(self):

        return

        from gdrivefs.gdtool import drive_proxy
        http = drive_proxy('get_authed_http')

        normalized_entry = EntryCache.get_instance().cache.get('1DcIWAjj-pnSCXBQa3kHJQuL-QMRoopx8Yx_LVhfRigk')
        mime_type = 'text/plain'

        files = drive_proxy('download_to_local', normalized_entry=normalized_entry, mime_type=mime_type)

        return

        from pprint import pprint
        url = files[16].download_links[u'text/plain']
        pprint(url)

        data = http.request(url)
        response_headers = data[0]

        import re
        r = re.compile('Range')
        found = [("%s: %s" % (k, v)) for k, v in response_headers.iteritems() if r.match(k)]
        if found:
            print("Found: %s" % (", ".join(found)))

        print(">>>===============================================")
#        print(data[1][:200])
        print("<<<===============================================")
Пример #2
0
    def test_download_file(self):

        return

        from gdrivefs.gdtool import drive_proxy
        http = drive_proxy('get_authed_http')

        normalized_entry = EntryCache.get_instance().cache.get('1DcIWAjj-pnSCXBQa3kHJQuL-QMRoopx8Yx_LVhfRigk')
        mime_type = 'text/plain'

        files = drive_proxy('download_to_local', normalized_entry=normalized_entry, mime_type=mime_type)

        return

        from pprint import pprint
        url = files[16].download_links[u'text/plain']
        pprint(url)

        data = http.request(url)
        response_headers = data[0]

        import re
        r = re.compile('Range')
        found = [("%s: %s" % (k, v)) for k, v in response_headers.iteritems() if r.match(k)]
        if found:
            print("Found: %s" % (", ".join(found)))

        print(">>>===============================================")
#        print(data[1][:200])
        print("<<<===============================================")
Пример #3
0
    def test_insert_entry(self):

        import datetime
#        filename = ("NewFolder_%s" % (datetime.datetime.now().strftime("%H%M%S")))
#        entry = drive_proxy('create_directory', filename=filename)

        filename = ("NewFile_%s.txt" % (datetime.datetime.now().strftime("%H%M%S")))
        entry = drive_proxy('create_file', filename=filename, data_filepath='/tmp/tmpdata.txt', parents=[])

        print(entry.id)
Пример #4
0
    def test_insert_entry(self):

        import datetime
#        filename = ("NewFolder_%s" % (datetime.datetime.now().strftime("%H%M%S")))
#        entry = drive_proxy('create_directory', filename=filename)

        filename = ("NewFile_%s.txt" % (datetime.datetime.now().strftime("%H%M%S")))
        entry = drive_proxy('create_file', filename=filename, data_filepath='/tmp/tmpdata.txt', [])

        print(entry.id)
Пример #5
0
    def test_list_files_by_parent_id(self):

        return

        entries = drive_proxy('list_files')

        from pprint import pprint
        import json
        with open('/tmp/entries', 'w') as f:
            for entry in entries:
                f.write("%s\n" % (json.dumps(entry.info)))
Пример #6
0
    def test_list_files_by_parent_id(self):

        return

        entries = drive_proxy('list_files')

        from pprint import pprint
        import json
        with open('/tmp/entries', 'w') as f:
            for entry in entries:
                f.write("%s\n" % (json.dumps(entry.info)))
Пример #7
0
    def test_get_parents_containing_id(self):

        return
        
        entry_id = u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk'

        try:
            parent_ids = drive_proxy('get_parents_containing_id', 
                                     child_id=entry_id)
        except:
            logging.exception("Could not retrieve parents for child with ID "
                              "[%s]." % (entry_id))
            raise

        from pprint import pprint
        pprint(parent_ids)
Пример #8
0
    def test_get_parents_containing_id(self):

        return
        
        entry_id = u'11EIs1ZxCykme0FnAdY8Xm_ktUCQ9y5lHC3EwAKFsiFk'

        try:
            parent_ids = drive_proxy('get_parents_containing_id', 
                                     child_id=entry_id)
        except:
            logging.exception("Could not retrieve parents for child with ID "
                              "[%s]." % (entry_id))
            raise

        from pprint import pprint
        pprint(parent_ids)
Пример #9
0
    def test_get_changes(self):

        from gdrivefs.change import get_change_manager
        get_change_manager().process_updates()

        import sys
        sys.exit()

        (largest_change_id, next_page_token, changes) = drive_proxy('list_changes')

        print("Largest Change ID: [%s]" % (largest_change_id))
        print("Next Page Token: [%s]" % (next_page_token))

        from pprint import pprint
        pprint(len(changes))
        print

        for change_id, (entry_id, was_deleted, entry) in changes.iteritems():
            print("%d> [%s] D:[%s] [%s]" % (change_id, entry_id, was_deleted, entry.title if entry else '<deleted>'))
Пример #10
0
    def process_updates(self):
        """Process any changes to our files. Return True if everything is up to
        date or False if we need to be run again.
        """

        start_at_id = (self.at_change_id + 1)

        try:
            result = drive_proxy('list_changes', start_change_id=start_at_id)
        except:
            self.__log.exception("Could not retrieve updates. Skipped.")
            return True

        (largest_change_id, next_page_token, changes) = result

        self.__log.debug("The latest reported change-ID is (%d) and we're "
                      "currently at change-ID (%d)." % (largest_change_id, 
                                                        self.at_change_id))

        if largest_change_id == self.at_change_id:
            self.__log.debug("No entries have changed.")
            return True

        self.__log.info("(%d) changes will now be applied." % (len(changes)))

        for change_id, change_tuple in changes.iteritems():
            # Apply the changes. We expect to be running them from oldest to 
            # newest.

            self.__log.info("Change with ID (%d) will now be applied." %
                         (change_id))

            try:
                self.__apply_change(change_id, change_tuple)
            except:
                self.__log.exception("There was a problem while processing change"
                                  " with ID (%d). No more changes will be "
                                  "applied." % (change_id))
                return False

            self.at_change_id = change_id

        return (next_page_token == None)
Пример #11
0
    def test_get_changes(self):

        from gdrivefs.change import get_change_manager
        get_change_manager().process_updates()

        import sys
        sys.exit()

        (largest_change_id, next_page_token, changes) = drive_proxy('list_changes')

        print("Largest Change ID: [%s]" % (largest_change_id))
        print("Next Page Token: [%s]" % (next_page_token))

        from pprint import pprint
        pprint(len(changes))
        print

        for change_id, (entry_id, was_deleted, entry) in changes.iteritems():
            print("%d> [%s] D:[%s] [%s]" % (change_id, entry_id, was_deleted, entry.title if entry else '<deleted>'))