def test_copyto(stream): stream.Write(b'Hello') stream.Seek(2, BOOKMARK_BEGINNING) stream2 = IStream() res = stream.CopyTo(stream2, 2) assert res == (2, 2) stream2.Seek(0, BOOKMARK_BEGINNING) assert stream2.Read(3) == b'll' assert stream.Seek(0, BOOKMARK_CURRENT) == 4
def state(mapiobj, associated=False): exporter = mapiobj.OpenProperty(PR_CONTENTS_SYNCHRONIZER, IID_IExchangeExportChanges, 0, 0) if associated: exporter.Config(None, SYNC_NORMAL | SYNC_ASSOCIATED | SYNC_CATCHUP, None, None, None, None, 0) else: exporter.Config(None, SYNC_NORMAL | SYNC_CATCHUP, None, None, None, None, 0) steps, step = None, 0 while steps != step: steps, step = exporter.Synchronize(step) stream = IStream() exporter.UpdateState(stream) stream.Seek(0, STREAM_SEEK_SET) return _benc(stream.Read(0xFFFFF))
def sync_gab(server, mapistore, importer, state): stream = IStream() stream.Write(_bdec(state)) stream.Seek(0, STREAM_SEEK_SET) importer = TrackingGABImporter(server, importer) exporter = mapistore.OpenProperty(PR_CONTENTS_SYNCHRONIZER, IID_IECExportAddressbookChanges, 0, 0) exporter.Config(stream, 0, importer) steps, step = None, 0 while steps != step: steps, step = exporter.Synchronize(step) stream = IStream() exporter.UpdateState(stream) stream.Seek(0, STREAM_SEEK_SET) return _benc(stream.Read(0xFFFFF))
def sync_hierarchy(server, syncobj, importer, state, stats=None): importer = TrackingHierarchyImporter(server, importer, stats) exporter = syncobj.OpenProperty(PR_HIERARCHY_SYNCHRONIZER, IID_IExchangeExportChanges, 0, 0) stream = IStream() stream.Write(_bdec(state)) stream.Seek(0, STREAM_SEEK_SET) flags = SYNC_NORMAL | SYNC_UNICODE exporter.Config(stream, flags, importer, None, None, None, 0) step = 0 while True: (steps, step) = exporter.Synchronize(step) if (steps == step): break exporter.UpdateState(stream) stream.Seek(0, STREAM_SEEK_SET) return _benc(stream.Read(0xFFFFF))
def sync(server, syncobj, importer, state, max_changes, associated=False, window=None, begin=None, end=None, stats=None): log = server.log importer = TrackingContentsImporter(server, importer, stats) exporter = syncobj.OpenProperty(PR_CONTENTS_SYNCHRONIZER, IID_IExchangeExportChanges, 0, 0) stream = IStream() stream.Write(_bdec(state)) stream.Seek(0, STREAM_SEEK_SET) restriction = None if window: # sync window of last N seconds propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(int(time.time()) - window)) restriction = SPropertyRestriction(RELOP_GE, PR_MESSAGE_DELIVERY_TIME, propval) elif begin or end: restrs = [] if begin: propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(time.mktime(begin.timetuple()))) restrs.append( SPropertyRestriction(RELOP_GE, PR_MESSAGE_DELIVERY_TIME, propval)) if end: propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(time.mktime(end.timetuple()))) restrs.append( SPropertyRestriction(RELOP_LT, PR_MESSAGE_DELIVERY_TIME, propval)) if len(restrs) == 1: restriction = restrs[0] else: restriction = SAndRestriction(restrs) flags = SYNC_NORMAL | SYNC_UNICODE | SYNC_READ_STATE if associated: flags |= SYNC_ASSOCIATED try: if TESTING and os.getenv('PYKO_TEST_NOT_FOUND'): raise MAPIErrorNotFound() exporter.Config(stream, flags, importer, restriction, None, None, 0) except MAPIErrorNotFound: # syncid purged because of 'sync_lifetime' option in server.cfg: get new syncid. log.warn( "Sync state does not exist on server (anymore); requesting new one" ) syncid, changeid = struct.unpack('<II', _bdec(state)) stream = IStream() stream.Write(struct.pack('<II', 0, changeid)) stream.Seek(0, STREAM_SEEK_SET) exporter.Config(stream, flags, importer, restriction, None, None, 0) step = retry = changes = 0 sleep_time = 0.4 while True: try: try: if TESTING and os.getenv( 'PYKO_TEST_NETWORK_ERROR') and not importer.skip: raise MAPIErrorNetworkError() (steps, step) = exporter.Synchronize(step) finally: importer.skip = False changes += 1 retry = 0 if (steps == step) or (max_changes and changes >= max_changes): break except MAPIError as e: log.warn( "Received a MAPI error or timeout (error=0x%x, retry=%d/5)", e.hr, retry) time.sleep(sleep_time) if sleep_time < 5.0: sleep_time *= 2.0 if retry < 5: retry += 1 else: log.error("Too many retries, skipping change") if stats is not None: stats['errors'] += 1 importer.skip = True # in case of a timeout or other issue, try to skip the change after trying several times retry = 0 exporter.UpdateState(stream) stream.Seek(0, STREAM_SEEK_SET) state = stream.Read(0xFFFFF) # because changes may be reordered for efficiency, we are not always # linearly following the change journal. so the current state cannot # always be represented as a single change id. instead, the current state # may contain changes which have been synced, relative to a certain # change id (so we have synced until this change id, plus these changes). # in pyko though, we always sync until there are no further changes, # so this should normally not occur. # TODO add an ICS flag to disable reordering! if len(state) != 8: log.error('sync state %d bytes, expect problems', len(state)) return _benc(state)
def sync(server, syncobj, importer, state, log, max_changes, associated=False, window=None, begin=None, end=None, stats=None): importer = TrackingContentsImporter(server, importer, log, stats) exporter = syncobj.OpenProperty(PR_CONTENTS_SYNCHRONIZER, IID_IExchangeExportChanges, 0, 0) stream = IStream() stream.Write(_bdec(state)) stream.Seek(0, STREAM_SEEK_SET) restriction = None if window: # sync window of last N seconds propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(int(time.time()) - window)) restriction = SPropertyRestriction(RELOP_GE, PR_MESSAGE_DELIVERY_TIME, propval) elif begin or end: restrs = [] if begin: propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(time.mktime(begin.timetuple()))) restrs.append(SPropertyRestriction(RELOP_GE, PR_MESSAGE_DELIVERY_TIME, propval)) if end: propval = SPropValue(PR_MESSAGE_DELIVERY_TIME, unixtime(time.mktime(end.timetuple()))) restrs.append(SPropertyRestriction(RELOP_LT, PR_MESSAGE_DELIVERY_TIME, propval)) if len(restrs) == 1: restriction = restrs[0] else: restriction = SAndRestriction(restrs) flags = SYNC_NORMAL | SYNC_UNICODE if associated: flags |= SYNC_ASSOCIATED try: exporter.Config(stream, flags, importer, restriction, None, None, 0) except MAPIErrorNotFound: # syncid purged because of 'sync_lifetime' option in server.cfg: get new syncid. if log: log.warn("Sync state does not exist on server (anymore); requesting new one") syncid, changeid = struct.unpack('<II', _bdec(state)) stream = IStream() stream.Write(struct.pack('<II', 0, changeid)) stream.Seek(0, STREAM_SEEK_SET) exporter.Config(stream, flags, importer, restriction, None, None, 0) step = retry = changes = 0 sleep_time = 0.4 while True: try: try: (steps, step) = exporter.Synchronize(step) finally: importer.skip = False changes += 1 retry = 0 if (steps == step) or (max_changes and changes >= max_changes): break except MAPIError as e: if log: log.warn("Received a MAPI error or timeout (error=0x%x, retry=%d/5)", e.hr, retry) time.sleep(sleep_time) if sleep_time < 5.0: sleep_time *= 2.0 if retry < 5: retry += 1 else: if log: log.error("Too many retries, skipping change") if stats: stats['errors'] += 1 importer.skip = True # in case of a timeout or other issue, try to skip the change after trying several times retry = 0 exporter.UpdateState(stream) stream.Seek(0, STREAM_SEEK_SET) return _benc(stream.Read(0xFFFFF))