def execute_arc_pull(ui_, params, stored_cfg): """ Update from an existing incremental archive in Freenet. """ update_sm = None top_key_state = None try: assert 'ARCHIVE_CACHE_DIR' in params assert not params['REQUEST_URI'] is None if not params['NO_SEARCH'] and is_usk_file(params['REQUEST_URI']): index = stored_cfg.get_index(params['REQUEST_URI']) if not index is None: if index >= get_version(params['REQUEST_URI']): # Update index to the latest known value # for the --uri case. params['REQUEST_URI'] = get_usk_for_usk_version( params['REQUEST_URI'], index) else: ui_.status(("Cached index [%i] < index in USK [%i]. " + "Using the index from the USK.\n" + "You're sure that index exists, right?\n") % (index, get_version(params['REQUEST_URI']))) update_sm = setup(ui_, None, params, stored_cfg) ui_.status( "%sRequest URI:\n%s\n" % (is_redundant(params['REQUEST_URI']), params['REQUEST_URI'])) # Pull changes into the local block cache. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({ 'REQUEST_URI': params['REQUEST_URI'], 'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'] }) start_requesting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): uri = update_sm.get_state(ARC_REQUESTING_URI).get_latest_uri() blocks = update_sm.get_state(ARC_CACHING_TOPKEY).get_blocks() plural = '' if len(blocks) != 1: plural = 's' ui_.status("Fetched %i bytes in %i CHK%s from:\n%s\n" % (sum([block[0] for block in blocks]), len(blocks), plural, uri)) ui_.status("Updating local directory...\n") local_synch( ui_, params['ARCHIVE_CACHE_DIR'], # Use the updated URI below so we get the # right cached topkey. uri, params['TO_DIR']) top_key_state = ARC_REQUESTING_URI else: ui_.status("Synchronize failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg, True) finally: arc_cleanup(update_sm, top_key_state)
def execute_arc_pull(ui_, params, stored_cfg): """ Update from an existing incremental archive in Freenet. """ update_sm = None top_key_state = None try: assert 'ARCHIVE_CACHE_DIR' in params assert not params['REQUEST_URI'] is None if not params['NO_SEARCH'] and is_usk_file(params['REQUEST_URI']): index = stored_cfg.get_index(params['REQUEST_URI']) if not index is None: if index >= get_version(params['REQUEST_URI']): # Update index to the latest known value # for the --uri case. params['REQUEST_URI'] = get_usk_for_usk_version( params['REQUEST_URI'], index) else: ui_.status(("Cached index [%i] < index in USK [%i]. " + "Using the index from the USK.\n" + "You're sure that index exists, right?\n") % (index, get_version(params['REQUEST_URI']))) update_sm = setup(ui_, None, params, stored_cfg) ui_.status("%sRequest URI:\n%s\n" % ( is_redundant(params['REQUEST_URI']), params['REQUEST_URI'])) # Pull changes into the local block cache. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({'REQUEST_URI':params['REQUEST_URI'], 'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR']}) start_requesting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): uri = update_sm.get_state(ARC_REQUESTING_URI).get_latest_uri() blocks = update_sm.get_state(ARC_CACHING_TOPKEY).get_blocks() plural = '' if len(blocks) != 1: plural = 's' ui_.status("Fetched %i bytes in %i CHK%s from:\n%s\n" % (sum([block[0] for block in blocks]), len(blocks), plural, uri)) ui_.status("Updating local directory...\n") local_synch(ui_, params['ARCHIVE_CACHE_DIR'], # Use the updated URI below so we get the # right cached topkey. uri, params['TO_DIR']) top_key_state = ARC_REQUESTING_URI else: ui_.status("Synchronize failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg, True) finally: arc_cleanup(update_sm, top_key_state)
def execute_arc_push(ui_, params, stored_cfg): """ Push an update into an incremental archive in Freenet. """ assert params.get('REQUEST_URI', None) is None # REDFLAG: why ? update_sm = None top_key_state = None try: update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.debug("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']), params['INSERT_URI'])) # Update the local archive. files, top_key = local_update(params['ARCHIVE_CACHE_DIR'], request_uri, params['FROM_DIR']) if files is None: raise util.Abort("There are no local changes to add.") for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Created new %i byte block.\n" % block[0]) # Insert them into Freenet. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({'REQUEST_URI':request_uri, 'INSERT_URI':params['INSERT_URI'], 'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY':top_key, 'ARCHIVE_BLOCK_FILES':files}) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): ui_.status("Inserted to:\n%s\n" % '\n'.join(update_sm.get_state(ARC_INSERTING_URI). get_request_uris())) top_key_state = ARC_INSERTING_URI else: ui_.status("Push to archive failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, top_key_state)
def execute_arc_push(ui_, params, stored_cfg): """ Push an update into an incremental archive in Freenet. """ assert params.get('REQUEST_URI', None) is None # REDFLAG: why ? update_sm = None top_key_state = None try: update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.status("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']), params['INSERT_URI'])) # Update the local archive. files, top_key = local_update(params['ARCHIVE_CACHE_DIR'], request_uri, params['FROM_DIR']) if files is None: raise util.Abort("There are no local changes to add.") for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Created new %i byte block.\n" % block[0]) # Insert them into Freenet. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({ 'REQUEST_URI': request_uri, 'INSERT_URI': params['INSERT_URI'], 'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY': top_key, 'ARCHIVE_BLOCK_FILES': files }) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): ui_.status("Inserted to:\n%s\n" % '\n'.join( update_sm.get_state(ARC_INSERTING_URI).get_request_uris())) top_key_state = ARC_INSERTING_URI else: ui_.status("Push to archive failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, top_key_state)
def execute_arc_reinsert(ui_, params, stored_cfg): """ Reinsert the archive into Freenet. """ assert not params.get('REQUEST_URI', None) is None assert params.get('REINSERT_LEVEL', 0) > 0 update_sm = None try: update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.status("%sRequest URI:\n%s\n" % (is_redundant(request_uri), request_uri)) # Get the blocks to re-insert. files, top_key = local_reinsert(params['ARCHIVE_CACHE_DIR'], request_uri) # Tell the user about them. for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Re-inserting %i byte block.\n" % block[0]) # Start re-inserting them. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({ 'REQUEST_URI': request_uri, 'INSERT_URI': params['INSERT_URI'], 'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY': top_key, 'ARCHIVE_BLOCK_FILES': files, 'REINSERT': params['REINSERT_LEVEL'] }) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): ui_.status("Re-insert finished.\n") else: ui_.status("Re-insert failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, None) # Don't prune cache.
def execute_arc_create(ui_, params, stored_cfg): """ Create a new incremental archive. """ update_sm = None top_key_state = None try: assert 'ARCHIVE_CACHE_DIR' in params assert 'FROM_DIR' in params update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.status("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']), params['INSERT_URI'])) # Create the local blocks. files, top_key = local_create(params['ARCHIVE_CACHE_DIR'], request_uri, params['FROM_DIR']) for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Created new %i byte block.\n" % block[0]) # Insert them into Freenet. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({ 'REQUEST_URI': request_uri, 'INSERT_URI': params['INSERT_URI'], 'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY': top_key, 'ARCHIVE_BLOCK_FILES': files }) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): ui_.status("Inserted to:\n%s\n" % '\n'.join( update_sm.get_state(ARC_INSERTING_URI).get_request_uris())) top_key_state = ARC_INSERTING_URI else: ui_.status("Archive create failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, top_key_state)
def execute_arc_reinsert(ui_, params, stored_cfg): """ Reinsert the archive into Freenet. """ assert not params.get('REQUEST_URI', None) is None assert params.get('REINSERT_LEVEL', 0) > 0 update_sm = None try: update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.status("%sRequest URI:\n%s\n" % (is_redundant(request_uri), request_uri)) # Get the blocks to re-insert. files, top_key = local_reinsert(params['ARCHIVE_CACHE_DIR'], request_uri) # Tell the user about them. for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Re-inserting %i byte block.\n" % block[0]) # Start re-inserting them. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({'REQUEST_URI':request_uri, 'INSERT_URI':params['INSERT_URI'], 'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY':top_key, 'ARCHIVE_BLOCK_FILES':files, 'REINSERT':params['REINSERT_LEVEL']}) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): ui_.status("Re-insert finished.\n") else: ui_.status("Re-insert failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, None) # Don't prune cache.
def execute_arc_create(ui_, params, stored_cfg): """ Create a new incremental archive. """ update_sm = None top_key_state = None try: assert 'ARCHIVE_CACHE_DIR' in params assert 'FROM_DIR' in params update_sm = setup(ui_, None, params, stored_cfg) request_uri, dummy = do_key_setup(ui_, update_sm, params, stored_cfg) create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri) ui_.debug("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']), params['INSERT_URI'])) # Create the local blocks. files, top_key = local_create(params['ARCHIVE_CACHE_DIR'], request_uri, params['FROM_DIR']) for block in top_key[0]: if block[1][0] == 'CHK@': ui_.status("Created new %i byte block.\n" % block[0]) # Insert them into Freenet. ctx = ArchiveUpdateContext(update_sm, ui_) ctx.update({'REQUEST_URI':request_uri, 'INSERT_URI':params['INSERT_URI'], 'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'], 'PROVISIONAL_TOP_KEY':top_key, 'ARCHIVE_BLOCK_FILES':files}) start_inserting_blocks(update_sm, ctx) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): ui_.status("Inserted to:\n%s\n" % '\n'.join(update_sm.get_state(ARC_INSERTING_URI). get_request_uris())) top_key_state = ARC_INSERTING_URI else: ui_.status("Archive create failed.\n") arc_handle_updating_config(update_sm, params, stored_cfg) finally: arc_cleanup(update_sm, top_key_state)
def test_requesting_redundant(self): if not 'FILE_BLOCKS' in SHARED_STATE: print "You must run test_inserting() before this test." self.assertTrue(False) ctx, update_sm, start_state = self.setup_request_sm() blocks = [] for entry in SHARED_STATE['FILE_BLOCKS']: blocks.append((entry[1], tuple(break_primary(entry[2])))) self.verify_not_cached(ctx, blocks) start_state.blocks = tuple(blocks) start(update_sm, ctx) run_until_quiescent(update_sm, POLL_SECS) self.assertTrue(update_sm.get_state(QUIESCENT). arrived_from(((FINISHING,)))) self.verify_cached(ctx, blocks)
def test_requesting_redundant(self): if not 'FILE_BLOCKS' in SHARED_STATE: print "You must run test_inserting() before this test." self.assertTrue(False) ctx, update_sm, start_state = self.setup_request_sm() blocks = [] for entry in SHARED_STATE['FILE_BLOCKS']: blocks.append((entry[1], tuple(break_primary(entry[2])))) self.verify_not_cached(ctx, blocks) start_state.blocks = tuple(blocks) start(update_sm, ctx) run_until_quiescent(update_sm, POLL_SECS) self.assertTrue( update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, )))) self.verify_cached(ctx, blocks)
def test_inserting(self): # Takes longer to insert existing blocks? offset = random.randrange(0, 256) print "offset: ", offset lengths = (FREENET_BLOCK_LEN - 1, FREENET_BLOCK_LEN, FREENET_BLOCK_LEN + 1, 1, FREENET_BLOCK_LEN + 11235, ) insert_files = [] for index, length in enumerate(lengths): full_path = os.path.join(self.tmp_dir, "%i.bin" % index) out_file = open(full_path, 'wb') out_file.write(bytes(length, offset)) out_file.close() self.assertTrue(os.path.getsize(full_path) == length) insert_files.append(full_path) update_sm = self.make_state_machine() self.assertTrue(not 'TEST_STATE' in update_sm.states) update_sm.states['TEST_STATE'] = ( InsertingRedundantBlocks(update_sm, 'TEST_STATE', FINISHING, FAILING)) ctx = ArchiveUpdateContext(update_sm, FakeUI()) ctx.update({'ARCHIVE_CACHE_DIR':self.tmp_dir, 'REQUEST_URI':SOME_USK, 'ARCHIVE_BLOCK_FILES':insert_files, 'START_STATE':'TEST_STATE'}) create_dirs(ctx.ui_, ctx['ARCHIVE_CACHE_DIR'], ctx['REQUEST_URI']) start(update_sm, ctx) run_until_quiescent(update_sm, POLL_SECS) self.assertTrue(update_sm.get_state(QUIESCENT). arrived_from(((FINISHING,)))) blocks = update_sm.states['TEST_STATE'].files for index, entry in enumerate(blocks): print "block [%i]: len: %i" % (index, entry[1]) for chk in entry[2]: print " ", chk # FREENET_BLOCK_LEN - 1, first is unpadded self.checkCHK(blocks[0][2][0], blocks[0][1], blocks[0][1], bytes(blocks[0][1], offset)) # FREENET_BLOCK_LEN - 1, second is padded self.checkCHK(blocks[0][2][1], blocks[0][1], blocks[0][1] + 1, bytes(blocks[0][1], offset)) # FREENET_BLOCK_LEN first is padded self.checkCHK(blocks[1][2][0], blocks[1][1], blocks[1][1] + 1, bytes(blocks[1][1], offset)) # FREENET_BLOCK_LEN second is padded self.checkCHK(blocks[1][2][1], blocks[1][1], blocks[1][1] + 1, bytes(blocks[1][1], offset)) # FREENET_BLOCK_LEN + 1, first is unpadded self.checkCHK(blocks[2][2][0], blocks[2][1], blocks[2][1], bytes(blocks[2][1], offset)) # FREENET_BLOCK_LEN + 1, second is unpadded self.checkCHK(blocks[2][2][1], blocks[2][1], blocks[2][1], bytes(blocks[2][1], offset)) # 1, first is unpadded self.checkCHK(blocks[3][2][0], blocks[3][1], blocks[3][1], bytes(blocks[3][1], offset)) # 1, second is padded self.checkCHK(blocks[3][2][1], blocks[3][1], blocks[3][1] + 1, bytes(blocks[3][1], offset)) # FREENET_BLOCK_LEN + 11235, first is unpadded self.checkCHK(blocks[4][2][0], blocks[4][1], blocks[4][1], bytes(blocks[4][1], offset)) # FREENET_BLOCK_LEN + 11235, second is unpadded self.checkCHK(blocks[4][2][1], blocks[4][1], blocks[4][1], bytes(blocks[4][1], offset)) # Save info for use in request testing SHARED_STATE['FILE_BLOCKS'] = blocks SHARED_STATE['OFFSET'] = offset
def execute_wiki_apply(ui_, repo, params, stored_cfg): """ Fetch a wiki change submission CHK and apply it to a local directory. """ update_sm = None try: assert 'REQUEST_URI' in params # Get version, i.e. just the hg parent == hg head version = get_hg_version(repo) # Get target directory. params['ISWIKI'] = True read_freesite_cfg(ui_, repo, params, stored_cfg) update_sm = setup(ui_, repo, params, stored_cfg) # Make an FCP download request which will run on the # on the state machine. request = StatefulRequest(update_sm) request.tag = 'submission_zip_request' request.in_params.definition = GET_DEF # To RAM. request.in_params.fcp_params = update_sm.params.copy() request.in_params.fcp_params['URI'] = params['REQUEST_URI'] # Knee high barrier against abuse. request.in_params.fcp_params['MaxSize'] = FREENET_BLOCK_LEN ui_.status("Requesting wiki submission from...\n%s\n" % params['REQUEST_URI']) update_sm.start_single_request(request) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): raw_bytes = update_sm.get_state(RUNNING_SINGLE_REQUEST).\ final_msg[2] assert request.response[0] == 'AllData' ui_.status("Fetched %i byte submission.\n" % len(raw_bytes)) base_ver, submitter = get_info(StringIO.StringIO(raw_bytes)) ui_.status("Base version: %s, Submitter: %s (unverifiable!)\n" % (base_ver[:12], submitter)) #print "H_ACKING base_ver to test exception!" #base_ver = 'da2f653c5c47b7ee7a814e668aa1d63c50c3a4f3' if not has_version(repo, base_ver): ui_.warn("That version isn't in the local repo.\n" + "Try running hg fn-pull --aggressive.\n") raise util.Abort("%s not in local repo" % base_ver[:12]) if base_ver != version: ui_.warn("Version mismatch! You might have to " + "manually merge.\n") # Set up an IFileFunctions that reads the correct versions of # the unpatched files out of Mercurial. overlay = HgFileOverlay( ui_, repo, # i.e. "<>/wiki_root" NOT " # <>/wiki_root/wikitext" os.path.join(repo.root, params['WIKI_ROOT']), # cleanup() in finally deletes this. make_temp_file(update_sm.ctx.bundle_cache.base_dir)) overlay.version = base_ver validate_wikitext(overlay) updates = unbundle_wikitext(overlay, StringIO.StringIO(raw_bytes)) for index, label in enumerate( ('CREATED', 'MODIFIED', 'REMOVED', 'ALREADY PATCHED')): if len(updates[index]) > 0: values = list(updates[index]) values.sort() ui_.status('%s:\n%s\n' % (label, '\n'.join(values))) finally: cleanup(update_sm)
def execute_wiki_submit(ui_, repo, params, stored_cfg): """ Insert and overlayed wiki change submission CHK into freenet and return a notification message string. """ update_sm = None try: # Read submitter out of stored_cfg submitter = stored_cfg.defaults.get('FMS_ID', None) assert not submitter is None assert submitter.find('@') == -1 # Get version, i.e. just the hg parent == hg head version = get_hg_version(repo) params['ISWIKI'] = True read_freesite_cfg(ui_, repo, params, stored_cfg) if not params.get('OVERLAYED', False): raise util.Abort("Can't submit from non-overlayed wiki edits!") if not params.get('CLIENT_WIKI_GROUP', None): # DCI: test code path raise util.Abort("No wiki_group in fnwiki.cfg. Don't " + "know where to post to!") ui_.status("\nPreparing to submit to %s FMS group as %s.\n" % (params['CLIENT_WIKI_GROUP'], submitter)) # Create submission zip file in RAM. overlay = get_file_funcs(os.path.join(repo.root, params['WIKI_ROOT']), True) try: raw_bytes = bundle_wikitext(overlay, version, submitter) except NoChangesError: raise util.Abort("There are no overlayed changes to submit.") # Punt if it's too big. if len(raw_bytes) >= FREENET_BLOCK_LEN: raise util.Abort("Too many changes. Change .zip must be <32K") update_sm = setup(ui_, repo, params, stored_cfg) # Make an FCP file insert request which will run on the # on the state machine. request = StatefulRequest(update_sm) request.tag = 'submission_zip_insert' request.in_params.definition = PUT_FILE_DEF request.in_params.fcp_params = update_sm.params.copy() request.in_params.fcp_params['URI'] = 'CHK@' request.in_params.send_data = raw_bytes ui_.status("Inserting %i byte submission CHK...\n" % len(raw_bytes)) update_sm.start_single_request(request) run_until_quiescent(update_sm, params['POLL_SECS']) heads = [hexlify(head) for head in repo.heads()] heads.sort() if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))): chk = update_sm.get_state(RUNNING_SINGLE_REQUEST).\ final_msg[1]['URI'] ui_.status("Patch CHK:\n%s\n" % chk) # ':', '|' not in freenet base64 # DCI: why normalize??? # (usk_hash, base_version, chk, length) ret = ':'.join( ('W', normalize(params['REQUEST_URI']), version[:12], chk, str(len(raw_bytes)))) ui_.status("\nNotification:\n%s\n" % ret + '\n') return ret, params['CLIENT_WIKI_GROUP'] raise util.Abort("Submission CHK insert failed.") finally: # Cleans up out file. cleanup(update_sm)
def test_inserting(self): # Takes longer to insert existing blocks? offset = random.randrange(0, 256) print "offset: ", offset lengths = ( FREENET_BLOCK_LEN - 1, FREENET_BLOCK_LEN, FREENET_BLOCK_LEN + 1, 1, FREENET_BLOCK_LEN + 11235, ) insert_files = [] for index, length in enumerate(lengths): full_path = os.path.join(self.tmp_dir, "%i.bin" % index) out_file = open(full_path, 'wb') out_file.write(bytes(length, offset)) out_file.close() self.assertTrue(os.path.getsize(full_path) == length) insert_files.append(full_path) update_sm = self.make_state_machine() self.assertTrue(not 'TEST_STATE' in update_sm.states) update_sm.states['TEST_STATE'] = (InsertingRedundantBlocks( update_sm, 'TEST_STATE', FINISHING, FAILING)) ctx = ArchiveUpdateContext(update_sm, FakeUI()) ctx.update({ 'ARCHIVE_CACHE_DIR': self.tmp_dir, 'REQUEST_URI': SOME_USK, 'ARCHIVE_BLOCK_FILES': insert_files, 'START_STATE': 'TEST_STATE' }) create_dirs(ctx.ui_, ctx['ARCHIVE_CACHE_DIR'], ctx['REQUEST_URI']) start(update_sm, ctx) run_until_quiescent(update_sm, POLL_SECS) self.assertTrue( update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, )))) blocks = update_sm.states['TEST_STATE'].files for index, entry in enumerate(blocks): print "block [%i]: len: %i" % (index, entry[1]) for chk in entry[2]: print " ", chk # FREENET_BLOCK_LEN - 1, first is unpadded self.checkCHK(blocks[0][2][0], blocks[0][1], blocks[0][1], bytes(blocks[0][1], offset)) # FREENET_BLOCK_LEN - 1, second is padded self.checkCHK(blocks[0][2][1], blocks[0][1], blocks[0][1] + 1, bytes(blocks[0][1], offset)) # FREENET_BLOCK_LEN first is padded self.checkCHK(blocks[1][2][0], blocks[1][1], blocks[1][1] + 1, bytes(blocks[1][1], offset)) # FREENET_BLOCK_LEN second is padded self.checkCHK(blocks[1][2][1], blocks[1][1], blocks[1][1] + 1, bytes(blocks[1][1], offset)) # FREENET_BLOCK_LEN + 1, first is unpadded self.checkCHK(blocks[2][2][0], blocks[2][1], blocks[2][1], bytes(blocks[2][1], offset)) # FREENET_BLOCK_LEN + 1, second is unpadded self.checkCHK(blocks[2][2][1], blocks[2][1], blocks[2][1], bytes(blocks[2][1], offset)) # 1, first is unpadded self.checkCHK(blocks[3][2][0], blocks[3][1], blocks[3][1], bytes(blocks[3][1], offset)) # 1, second is padded self.checkCHK(blocks[3][2][1], blocks[3][1], blocks[3][1] + 1, bytes(blocks[3][1], offset)) # FREENET_BLOCK_LEN + 11235, first is unpadded self.checkCHK(blocks[4][2][0], blocks[4][1], blocks[4][1], bytes(blocks[4][1], offset)) # FREENET_BLOCK_LEN + 11235, second is unpadded self.checkCHK(blocks[4][2][1], blocks[4][1], blocks[4][1], bytes(blocks[4][1], offset)) # Save info for use in request testing SHARED_STATE['FILE_BLOCKS'] = blocks SHARED_STATE['OFFSET'] = offset
def execute_wiki_submit(ui_, repo, params, stored_cfg): """ Insert and overlayed wiki change submission CHK into freenet and return a notification message string. """ update_sm = None try: # Read submitter out of stored_cfg submitter = stored_cfg.defaults.get('FMS_ID', None) assert not submitter is None assert submitter.find('@') == -1 # Get version, i.e. just the hg parent == hg head version = get_hg_version(repo) params['ISWIKI'] = True read_freesite_cfg(ui_, repo, params, stored_cfg) if not params.get('OVERLAYED', False): raise util.Abort("Can't submit from non-overlayed wiki edits!") if not params.get('CLIENT_WIKI_GROUP', None): # DCI: test code path raise util.Abort("No wiki_group in fnwiki.cfg. Don't " + "know where to post to!") ui_.status("\nPreparing to submit to %s FMS group as %s.\n" % (params['CLIENT_WIKI_GROUP'], submitter)) # Create submission zip file in RAM. overlay = get_file_funcs(os.path.join(repo.root, params['WIKI_ROOT']), True) try: raw_bytes = bundle_wikitext(overlay, version, submitter) except NoChangesError: raise util.Abort("There are no overlayed changes to submit.") # Punt if it's too big. if len(raw_bytes) >= FREENET_BLOCK_LEN: raise util.Abort("Too many changes. Change .zip must be <32K") update_sm = setup(ui_, repo, params, stored_cfg) # Make an FCP file insert request which will run on the # on the state machine. request = StatefulRequest(update_sm) request.tag = 'submission_zip_insert' request.in_params.definition = PUT_FILE_DEF request.in_params.fcp_params = update_sm.params.copy() request.in_params.fcp_params['URI'] = 'CHK@' request.in_params.send_data = raw_bytes ui_.status("Inserting %i byte submission CHK...\n" % len(raw_bytes)) update_sm.start_single_request(request) run_until_quiescent(update_sm, params['POLL_SECS']) heads = [hexlify(head) for head in repo.heads()] heads.sort() if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): chk = update_sm.get_state(RUNNING_SINGLE_REQUEST).\ final_msg[1]['URI'] ui_.status("Patch CHK:\n%s\n" % chk) # ':', '|' not in freenet base64 # DCI: why normalize??? # (usk_hash, base_version, chk, length) ret = ':'.join(('W', normalize(params['REQUEST_URI']), version[:12], chk, str(len(raw_bytes)))) ui_.status("\nNotification:\n%s\n" % ret + '\n') return ret, params['CLIENT_WIKI_GROUP'] raise util.Abort("Submission CHK insert failed.") finally: # Cleans up out file. cleanup(update_sm)
def execute_wiki_apply(ui_, repo, params, stored_cfg): """ Fetch a wiki change submission CHK and apply it to a local directory. """ update_sm = None try: assert 'REQUEST_URI' in params # Get version, i.e. just the hg parent == hg head version = get_hg_version(repo) # Get target directory. params['ISWIKI'] = True read_freesite_cfg(ui_, repo, params, stored_cfg) update_sm = setup(ui_, repo, params, stored_cfg) # Make an FCP download request which will run on the # on the state machine. request = StatefulRequest(update_sm) request.tag = 'submission_zip_request' request.in_params.definition = GET_DEF # To RAM. request.in_params.fcp_params = update_sm.params.copy() request.in_params.fcp_params['URI'] = params['REQUEST_URI'] # Knee high barrier against abuse. request.in_params.fcp_params['MaxSize'] = FREENET_BLOCK_LEN ui_.status("Requesting wiki submission from...\n%s\n" % params['REQUEST_URI']) update_sm.start_single_request(request) run_until_quiescent(update_sm, params['POLL_SECS']) if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))): raw_bytes = update_sm.get_state(RUNNING_SINGLE_REQUEST).\ final_msg[2] assert request.response[0] == 'AllData' ui_.status("Fetched %i byte submission.\n" % len(raw_bytes)) base_ver, submitter = get_info(StringIO.StringIO(raw_bytes)) ui_.status("Base version: %s, Submitter: %s (unverifiable!)\n" % (base_ver[:12], submitter)) #print "H_ACKING base_ver to test exception!" #base_ver = 'da2f653c5c47b7ee7a814e668aa1d63c50c3a4f3' if not has_version(repo, base_ver): ui_.warn("That version isn't in the local repo.\n" + "Try running hg fn-pull --aggressive.\n") raise util.Abort("%s not in local repo" % base_ver[:12]) if base_ver != version: ui_.warn("Version mismatch! You might have to " + "manually merge.\n") # Set up an IFileFunctions that reads the correct versions of # the unpatched files out of Mercurial. overlay = HgFileOverlay(ui_, repo, # i.e. "<>/wiki_root" NOT " # <>/wiki_root/wikitext" os.path.join(repo.root, params['WIKI_ROOT']), # cleanup() in finally deletes this. make_temp_file(update_sm.ctx. bundle_cache.base_dir)) overlay.version = base_ver validate_wikitext(overlay) updates = unbundle_wikitext(overlay, StringIO.StringIO(raw_bytes)) for index, label in enumerate(('CREATED', 'MODIFIED', 'REMOVED', 'ALREADY PATCHED')): if len(updates[index]) > 0: values = list(updates[index]) values.sort() ui_.status('%s:\n%s\n' % (label, '\n'.join(values))) finally: cleanup(update_sm)