def setup_request_sm(self):
        """ Helper sets up a state machine instance containing a
            RequestingRedundantBlocks instance. """
        update_sm = self.make_state_machine()
        self.assertTrue(not 'TEST_HAS_BLOCKS' in update_sm.states)

        update_sm.states['TEST_HAS_BLOCKS'] = (
            HoldingBlocks(update_sm, 'TEST_HAS_BLOCKS',
                          'TEST_REQUESTING'))


        update_sm.states['TEST_REQUESTING'] = (
            RequestingRedundantBlocks(update_sm,
                                      'TEST_REQUESTING',
                                      FINISHING,
                                      FAILING))

        ctx = ArchiveUpdateContext(update_sm, FakeUI())
        ctx.update({'ARCHIVE_CACHE_DIR':self.tmp_dir,
                    'REQUEST_URI':SOME_USK,
                    'START_STATE':'TEST_HAS_BLOCKS'})

        create_dirs(ctx.ui_,
                    ctx['ARCHIVE_CACHE_DIR'],
                    ctx['REQUEST_URI'])

        return (ctx, update_sm, update_sm.states['TEST_HAS_BLOCKS'])
Example #2
0
def execute_arc_pull(ui_, params, stored_cfg):
    """ Update from an existing incremental archive in Freenet. """
    update_sm = None
    top_key_state = None
    try:
        assert 'ARCHIVE_CACHE_DIR' in params
        assert not params['REQUEST_URI'] is None
        if not params['NO_SEARCH'] and is_usk_file(params['REQUEST_URI']):
            index = stored_cfg.get_index(params['REQUEST_URI'])
            if not index is None:
                if index >= get_version(params['REQUEST_URI']):
                    # Update index to the latest known value
                    # for the --uri case.
                    params['REQUEST_URI'] = get_usk_for_usk_version(
                        params['REQUEST_URI'], index)
                else:
                    ui_.status(("Cached index [%i] < index in USK [%i].  " +
                                "Using the index from the USK.\n" +
                                "You're sure that index exists, right?\n") %
                               (index, get_version(params['REQUEST_URI'])))

        update_sm = setup(ui_, None, params, stored_cfg)
        ui_.status(
            "%sRequest URI:\n%s\n" %
            (is_redundant(params['REQUEST_URI']), params['REQUEST_URI']))

        # Pull changes into the local block cache.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({
            'REQUEST_URI': params['REQUEST_URI'],
            'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR']
        })
        start_requesting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))):
            uri = update_sm.get_state(ARC_REQUESTING_URI).get_latest_uri()
            blocks = update_sm.get_state(ARC_CACHING_TOPKEY).get_blocks()
            plural = ''
            if len(blocks) != 1:
                plural = 's'
            ui_.status("Fetched %i bytes in %i CHK%s from:\n%s\n" %
                       (sum([block[0]
                             for block in blocks]), len(blocks), plural, uri))
            ui_.status("Updating local directory...\n")
            local_synch(
                ui_,
                params['ARCHIVE_CACHE_DIR'],
                # Use the updated URI below so we get the
                # right cached topkey.
                uri,
                params['TO_DIR'])
            top_key_state = ARC_REQUESTING_URI
        else:
            ui_.status("Synchronize failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg, True)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #3
0
def execute_arc_pull(ui_, params, stored_cfg):
    """ Update from an existing incremental archive in Freenet. """
    update_sm = None
    top_key_state = None
    try:
        assert 'ARCHIVE_CACHE_DIR' in params
        assert not params['REQUEST_URI'] is None
        if not params['NO_SEARCH'] and is_usk_file(params['REQUEST_URI']):
            index = stored_cfg.get_index(params['REQUEST_URI'])
            if not index is None:
                if index >= get_version(params['REQUEST_URI']):
                    # Update index to the latest known value
                    # for the --uri case.
                    params['REQUEST_URI'] = get_usk_for_usk_version(
                        params['REQUEST_URI'], index)
                else:
                    ui_.status(("Cached index [%i] < index in USK [%i].  "
                                + "Using the index from the USK.\n"
                                + "You're sure that index exists, right?\n") %
                               (index, get_version(params['REQUEST_URI'])))

        update_sm = setup(ui_, None, params, stored_cfg)
        ui_.status("%sRequest URI:\n%s\n" % (
            is_redundant(params['REQUEST_URI']),
            params['REQUEST_URI']))

        # Pull changes into the local block cache.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({'REQUEST_URI':params['REQUEST_URI'],
                    'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR']})
        start_requesting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))):
            uri = update_sm.get_state(ARC_REQUESTING_URI).get_latest_uri()
            blocks = update_sm.get_state(ARC_CACHING_TOPKEY).get_blocks()
            plural = ''
            if len(blocks) != 1:
                plural = 's'
            ui_.status("Fetched %i bytes in %i CHK%s from:\n%s\n" %
                       (sum([block[0] for block in blocks]),
                        len(blocks), plural, uri))
            ui_.status("Updating local directory...\n")
            local_synch(ui_,
                        params['ARCHIVE_CACHE_DIR'],
                        # Use the updated URI below so we get the
                        # right cached topkey.
                        uri,
                        params['TO_DIR'])
            top_key_state = ARC_REQUESTING_URI
        else:
            ui_.status("Synchronize failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg, True)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #4
0
def execute_arc_push(ui_, params, stored_cfg):
    """ Push an update into an incremental archive in Freenet. """
    assert params.get('REQUEST_URI', None) is None # REDFLAG: why ?
    update_sm = None
    top_key_state = None
    try:
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params,
                                                     stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)
        ui_.debug("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']),
                                            params['INSERT_URI']))


        # Update the local archive.
        files, top_key = local_update(params['ARCHIVE_CACHE_DIR'],
                                      request_uri,
                                      params['FROM_DIR'])

        if files is None:
            raise util.Abort("There are no local changes to add.")

        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Created new %i byte block.\n" % block[0])

        # Insert them into Freenet.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({'REQUEST_URI':request_uri,
                    'INSERT_URI':params['INSERT_URI'],
                    'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'],
                    'PROVISIONAL_TOP_KEY':top_key,
                    'ARCHIVE_BLOCK_FILES':files})

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))):
            ui_.status("Inserted to:\n%s\n" %
                       '\n'.join(update_sm.get_state(ARC_INSERTING_URI).
                                 get_request_uris()))
            top_key_state = ARC_INSERTING_URI
        else:
            ui_.status("Push to archive failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #5
0
def execute_arc_push(ui_, params, stored_cfg):
    """ Push an update into an incremental archive in Freenet. """
    assert params.get('REQUEST_URI', None) is None  # REDFLAG: why ?
    update_sm = None
    top_key_state = None
    try:
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params,
                                                     stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)
        ui_.status("%sInsert URI:\n%s\n" %
                   (is_redundant(params['INSERT_URI']), params['INSERT_URI']))

        # Update the local archive.
        files, top_key = local_update(params['ARCHIVE_CACHE_DIR'], request_uri,
                                      params['FROM_DIR'])

        if files is None:
            raise util.Abort("There are no local changes to add.")

        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Created new %i byte block.\n" % block[0])

        # Insert them into Freenet.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({
            'REQUEST_URI': request_uri,
            'INSERT_URI': params['INSERT_URI'],
            'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'],
            'PROVISIONAL_TOP_KEY': top_key,
            'ARCHIVE_BLOCK_FILES': files
        })

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))):
            ui_.status("Inserted to:\n%s\n" % '\n'.join(
                update_sm.get_state(ARC_INSERTING_URI).get_request_uris()))
            top_key_state = ARC_INSERTING_URI
        else:
            ui_.status("Push to archive failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #6
0
def execute_arc_reinsert(ui_, params, stored_cfg):
    """ Reinsert the archive into Freenet. """
    assert not params.get('REQUEST_URI', None) is None
    assert params.get('REINSERT_LEVEL', 0) > 0

    update_sm = None
    try:
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params,
                                                     stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)

        ui_.status("%sRequest URI:\n%s\n" %
                   (is_redundant(request_uri), request_uri))

        # Get the blocks to re-insert.
        files, top_key = local_reinsert(params['ARCHIVE_CACHE_DIR'],
                                        request_uri)

        # Tell the user about them.
        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Re-inserting %i byte block.\n" % block[0])

        # Start re-inserting them.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({
            'REQUEST_URI': request_uri,
            'INSERT_URI': params['INSERT_URI'],
            'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'],
            'PROVISIONAL_TOP_KEY': top_key,
            'ARCHIVE_BLOCK_FILES': files,
            'REINSERT': params['REINSERT_LEVEL']
        })

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))):
            ui_.status("Re-insert finished.\n")
        else:
            ui_.status("Re-insert failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, None)  # Don't prune cache.
Example #7
0
def execute_arc_create(ui_, params, stored_cfg):
    """ Create a new incremental archive. """
    update_sm = None
    top_key_state = None
    try:
        assert 'ARCHIVE_CACHE_DIR' in params
        assert 'FROM_DIR' in params
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy = do_key_setup(ui_, update_sm, params, stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)
        ui_.status("%sInsert URI:\n%s\n" %
                   (is_redundant(params['INSERT_URI']), params['INSERT_URI']))

        # Create the local blocks.
        files, top_key = local_create(params['ARCHIVE_CACHE_DIR'], request_uri,
                                      params['FROM_DIR'])

        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Created new %i byte block.\n" % block[0])

        # Insert them into Freenet.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({
            'REQUEST_URI': request_uri,
            'INSERT_URI': params['INSERT_URI'],
            'ARCHIVE_CACHE_DIR': params['ARCHIVE_CACHE_DIR'],
            'PROVISIONAL_TOP_KEY': top_key,
            'ARCHIVE_BLOCK_FILES': files
        })

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))):
            ui_.status("Inserted to:\n%s\n" % '\n'.join(
                update_sm.get_state(ARC_INSERTING_URI).get_request_uris()))
            top_key_state = ARC_INSERTING_URI
        else:
            ui_.status("Archive create failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #8
0
def execute_arc_reinsert(ui_, params, stored_cfg):
    """ Reinsert the archive into Freenet. """
    assert not params.get('REQUEST_URI', None) is None
    assert params.get('REINSERT_LEVEL', 0) > 0

    update_sm = None
    try:
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy_is_keypair = do_key_setup(ui_, update_sm, params,
                                                     stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)

        ui_.status("%sRequest URI:\n%s\n" % (is_redundant(request_uri),
                                             request_uri))

        # Get the blocks to re-insert.
        files, top_key = local_reinsert(params['ARCHIVE_CACHE_DIR'],
                                        request_uri)

        # Tell the user about them.
        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Re-inserting %i byte block.\n" % block[0])

        # Start re-inserting them.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({'REQUEST_URI':request_uri,
                    'INSERT_URI':params['INSERT_URI'],
                    'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'],
                    'PROVISIONAL_TOP_KEY':top_key,
                    'ARCHIVE_BLOCK_FILES':files,
                    'REINSERT':params['REINSERT_LEVEL']})

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))):
            ui_.status("Re-insert finished.\n")
        else:
            ui_.status("Re-insert failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, None) # Don't prune cache.
Example #9
0
def execute_arc_create(ui_, params, stored_cfg):
    """ Create a new incremental archive. """
    update_sm = None
    top_key_state = None
    try:
        assert 'ARCHIVE_CACHE_DIR' in params
        assert 'FROM_DIR' in params
        update_sm = setup(ui_, None, params, stored_cfg)
        request_uri, dummy = do_key_setup(ui_, update_sm, params, stored_cfg)
        create_dirs(ui_, params['ARCHIVE_CACHE_DIR'], request_uri)
        ui_.debug("%sInsert URI:\n%s\n" % (is_redundant(params['INSERT_URI']),
                                            params['INSERT_URI']))

        # Create the local blocks.
        files, top_key = local_create(params['ARCHIVE_CACHE_DIR'],
                                      request_uri,
                                      params['FROM_DIR'])

        for block in top_key[0]:
            if block[1][0] == 'CHK@':
                ui_.status("Created new %i byte block.\n" % block[0])

        # Insert them into Freenet.
        ctx = ArchiveUpdateContext(update_sm, ui_)
        ctx.update({'REQUEST_URI':request_uri,
                    'INSERT_URI':params['INSERT_URI'],
                    'ARCHIVE_CACHE_DIR':params['ARCHIVE_CACHE_DIR'],
                    'PROVISIONAL_TOP_KEY':top_key,
                    'ARCHIVE_BLOCK_FILES':files})

        start_inserting_blocks(update_sm, ctx)
        run_until_quiescent(update_sm, params['POLL_SECS'])

        if update_sm.get_state(QUIESCENT).arrived_from(((FINISHING,))):
            ui_.status("Inserted to:\n%s\n" %
                       '\n'.join(update_sm.get_state(ARC_INSERTING_URI).
                                 get_request_uris()))
            top_key_state = ARC_INSERTING_URI
        else:
            ui_.status("Archive create failed.\n")

        arc_handle_updating_config(update_sm, params, stored_cfg)
    finally:
        arc_cleanup(update_sm, top_key_state)
Example #10
0
    def setup_request_sm(self):
        """ Helper sets up a state machine instance containing a
            RequestingRedundantBlocks instance. """
        update_sm = self.make_state_machine()
        self.assertTrue(not 'TEST_HAS_BLOCKS' in update_sm.states)

        update_sm.states['TEST_HAS_BLOCKS'] = (HoldingBlocks(
            update_sm, 'TEST_HAS_BLOCKS', 'TEST_REQUESTING'))

        update_sm.states['TEST_REQUESTING'] = (RequestingRedundantBlocks(
            update_sm, 'TEST_REQUESTING', FINISHING, FAILING))

        ctx = ArchiveUpdateContext(update_sm, FakeUI())
        ctx.update({
            'ARCHIVE_CACHE_DIR': self.tmp_dir,
            'REQUEST_URI': SOME_USK,
            'START_STATE': 'TEST_HAS_BLOCKS'
        })

        create_dirs(ctx.ui_, ctx['ARCHIVE_CACHE_DIR'], ctx['REQUEST_URI'])

        return (ctx, update_sm, update_sm.states['TEST_HAS_BLOCKS'])
Example #11
0
    def make_state_machine(self):
        if not self.connection is None:
            self.connection.close()

        callbacks = UICallbacks(FakeUI())
        callbacks.verbosity = 5
        # Knows about reading and writing bytes.
        async_socket = PolledSocket(FCP_HOST, FCP_PORT)
        # Knows about running the FCP protocol over async_socket.
        self.connection = FCPConnection(async_socket, True,
                                        callbacks.connection_state)
        # Knows about running requests from a request queue.
        runner = RequestRunner(self.connection, N_CONCURRENT)
        # Knows how to run series of requests to perform operations
        # on an archive in Freenet.
        sm = ArchiveStateMachine(runner, ArchiveUpdateContext())
        sm.transition_callback = callbacks.transition_callback
        sm.monitor_callback = callbacks.monitor_callback
        sm.params['CANCEL_TIME_SECS'] = CANCEL_TIME_SECS

        return sm
    def test_inserting(self):
        # Takes longer to insert existing blocks?
        offset = random.randrange(0, 256)
        print "offset: ", offset
        lengths = (FREENET_BLOCK_LEN - 1,
                   FREENET_BLOCK_LEN,
                   FREENET_BLOCK_LEN + 1,
                   1,
                   FREENET_BLOCK_LEN + 11235,
                   )

        insert_files = []
        for index, length in enumerate(lengths):
            full_path = os.path.join(self.tmp_dir,
                                     "%i.bin" % index)
            out_file = open(full_path, 'wb')
            out_file.write(bytes(length, offset))
            out_file.close()
            self.assertTrue(os.path.getsize(full_path) == length)
            insert_files.append(full_path)

        update_sm = self.make_state_machine()
        self.assertTrue(not 'TEST_STATE' in update_sm.states)
        update_sm.states['TEST_STATE'] = (
            InsertingRedundantBlocks(update_sm,
                                     'TEST_STATE',
                                     FINISHING,
                                     FAILING))


        ctx = ArchiveUpdateContext(update_sm, FakeUI())
        ctx.update({'ARCHIVE_CACHE_DIR':self.tmp_dir,
                    'REQUEST_URI':SOME_USK,
                    'ARCHIVE_BLOCK_FILES':insert_files,
                    'START_STATE':'TEST_STATE'})

        create_dirs(ctx.ui_,
                    ctx['ARCHIVE_CACHE_DIR'],
                    ctx['REQUEST_URI'])

        start(update_sm, ctx)
        run_until_quiescent(update_sm, POLL_SECS)
        self.assertTrue(update_sm.get_state(QUIESCENT).
                        arrived_from(((FINISHING,))))

        blocks = update_sm.states['TEST_STATE'].files
        for index, entry in enumerate(blocks):
            print "block [%i]: len: %i" % (index, entry[1])
            for chk in entry[2]:
                print "   ", chk

        # FREENET_BLOCK_LEN - 1, first is unpadded
        self.checkCHK(blocks[0][2][0], blocks[0][1], blocks[0][1],
                      bytes(blocks[0][1], offset))
        # FREENET_BLOCK_LEN - 1, second is padded
        self.checkCHK(blocks[0][2][1], blocks[0][1], blocks[0][1] + 1,
                      bytes(blocks[0][1], offset))

        # FREENET_BLOCK_LEN first is padded
        self.checkCHK(blocks[1][2][0], blocks[1][1], blocks[1][1] + 1,
                      bytes(blocks[1][1], offset))
        # FREENET_BLOCK_LEN second is padded
        self.checkCHK(blocks[1][2][1], blocks[1][1], blocks[1][1] + 1,
                      bytes(blocks[1][1], offset))

        # FREENET_BLOCK_LEN + 1, first is unpadded
        self.checkCHK(blocks[2][2][0], blocks[2][1], blocks[2][1],
                      bytes(blocks[2][1], offset))
        # FREENET_BLOCK_LEN + 1, second is unpadded
        self.checkCHK(blocks[2][2][1], blocks[2][1], blocks[2][1],
                      bytes(blocks[2][1], offset))

        # 1, first is unpadded
        self.checkCHK(blocks[3][2][0], blocks[3][1], blocks[3][1],
                      bytes(blocks[3][1], offset))

        # 1, second is padded
        self.checkCHK(blocks[3][2][1], blocks[3][1], blocks[3][1] + 1,
                      bytes(blocks[3][1], offset))


        # FREENET_BLOCK_LEN + 11235, first is unpadded
        self.checkCHK(blocks[4][2][0], blocks[4][1], blocks[4][1],
                      bytes(blocks[4][1], offset))

        # FREENET_BLOCK_LEN + 11235, second is unpadded
        self.checkCHK(blocks[4][2][1], blocks[4][1], blocks[4][1],
                      bytes(blocks[4][1], offset))

        # Save info for use in request testing
        SHARED_STATE['FILE_BLOCKS'] =  blocks
        SHARED_STATE['OFFSET'] = offset
Example #13
0
        connection = FCPConnection(async_socket, True,
                                   callbacks.connection_state)
    except socket.error, err:  # Not an IOError until 2.6.
        ui_.warn("Connection to FCP server [%s:%i] failed.\n" %
                 (params['FCP_HOST'], params['FCP_PORT']))
        raise err
    except IOError, err:
        ui_.warn("Connection to FCP server [%s:%i] failed.\n" %
                 (params['FCP_HOST'], params['FCP_PORT']))
        raise err

    runner = RequestRunner(connection, params['N_CONCURRENT'])

    if repo is None:
        # For incremental archives.
        ctx = ArchiveUpdateContext()
        update_sm = ArchiveStateMachine(runner, ctx)
    else:
        # For Infocalypse repositories
        ctx = UpdateContext(None)
        ctx.repo = repo
        ctx.ui_ = ui_
        ctx.bundle_cache = cache
        update_sm = UpdateStateMachine(runner, ctx)

    update_sm.params = params.copy()
    update_sm.transition_callback = callbacks.transition_callback
    update_sm.monitor_callback = callbacks.monitor_callback

    # Modify only after copy.
    update_sm.params['FREENET_BUILD'] = runner.connection.node_hello[1][
Example #14
0
    def test_inserting(self):
        # Takes longer to insert existing blocks?
        offset = random.randrange(0, 256)
        print "offset: ", offset
        lengths = (
            FREENET_BLOCK_LEN - 1,
            FREENET_BLOCK_LEN,
            FREENET_BLOCK_LEN + 1,
            1,
            FREENET_BLOCK_LEN + 11235,
        )

        insert_files = []
        for index, length in enumerate(lengths):
            full_path = os.path.join(self.tmp_dir, "%i.bin" % index)
            out_file = open(full_path, 'wb')
            out_file.write(bytes(length, offset))
            out_file.close()
            self.assertTrue(os.path.getsize(full_path) == length)
            insert_files.append(full_path)

        update_sm = self.make_state_machine()
        self.assertTrue(not 'TEST_STATE' in update_sm.states)
        update_sm.states['TEST_STATE'] = (InsertingRedundantBlocks(
            update_sm, 'TEST_STATE', FINISHING, FAILING))

        ctx = ArchiveUpdateContext(update_sm, FakeUI())
        ctx.update({
            'ARCHIVE_CACHE_DIR': self.tmp_dir,
            'REQUEST_URI': SOME_USK,
            'ARCHIVE_BLOCK_FILES': insert_files,
            'START_STATE': 'TEST_STATE'
        })

        create_dirs(ctx.ui_, ctx['ARCHIVE_CACHE_DIR'], ctx['REQUEST_URI'])

        start(update_sm, ctx)
        run_until_quiescent(update_sm, POLL_SECS)
        self.assertTrue(
            update_sm.get_state(QUIESCENT).arrived_from(((FINISHING, ))))

        blocks = update_sm.states['TEST_STATE'].files
        for index, entry in enumerate(blocks):
            print "block [%i]: len: %i" % (index, entry[1])
            for chk in entry[2]:
                print "   ", chk

        # FREENET_BLOCK_LEN - 1, first is unpadded
        self.checkCHK(blocks[0][2][0], blocks[0][1], blocks[0][1],
                      bytes(blocks[0][1], offset))
        # FREENET_BLOCK_LEN - 1, second is padded
        self.checkCHK(blocks[0][2][1], blocks[0][1], blocks[0][1] + 1,
                      bytes(blocks[0][1], offset))

        # FREENET_BLOCK_LEN first is padded
        self.checkCHK(blocks[1][2][0], blocks[1][1], blocks[1][1] + 1,
                      bytes(blocks[1][1], offset))
        # FREENET_BLOCK_LEN second is padded
        self.checkCHK(blocks[1][2][1], blocks[1][1], blocks[1][1] + 1,
                      bytes(blocks[1][1], offset))

        # FREENET_BLOCK_LEN + 1, first is unpadded
        self.checkCHK(blocks[2][2][0], blocks[2][1], blocks[2][1],
                      bytes(blocks[2][1], offset))
        # FREENET_BLOCK_LEN + 1, second is unpadded
        self.checkCHK(blocks[2][2][1], blocks[2][1], blocks[2][1],
                      bytes(blocks[2][1], offset))

        # 1, first is unpadded
        self.checkCHK(blocks[3][2][0], blocks[3][1], blocks[3][1],
                      bytes(blocks[3][1], offset))

        # 1, second is padded
        self.checkCHK(blocks[3][2][1], blocks[3][1], blocks[3][1] + 1,
                      bytes(blocks[3][1], offset))

        # FREENET_BLOCK_LEN + 11235, first is unpadded
        self.checkCHK(blocks[4][2][0], blocks[4][1], blocks[4][1],
                      bytes(blocks[4][1], offset))

        # FREENET_BLOCK_LEN + 11235, second is unpadded
        self.checkCHK(blocks[4][2][1], blocks[4][1], blocks[4][1],
                      bytes(blocks[4][1], offset))

        # Save info for use in request testing
        SHARED_STATE['FILE_BLOCKS'] = blocks
        SHARED_STATE['OFFSET'] = offset
Example #15
0
        connection = FCPConnection(async_socket, True,
                                   callbacks.connection_state)
    except socket.error, err: # Not an IOError until 2.6.
        ui_.warn("Connection to FCP server [%s:%i] failed.\n"
                % (params['FCP_HOST'], params['FCP_PORT']))
        raise err
    except IOError, err:
        ui_.warn("Connection to FCP server [%s:%i] failed.\n"
                % (params['FCP_HOST'], params['FCP_PORT']))
        raise err

    runner = RequestRunner(connection, params['N_CONCURRENT'])

    if repo is None:
        # For incremental archives.
        ctx = ArchiveUpdateContext()
        update_sm = ArchiveStateMachine(runner, ctx)
    else:
        # For Infocalypse repositories
        ctx = UpdateContext(None)
        ctx.repo = repo
        ctx.ui_ = ui_
        ctx.bundle_cache = cache
        update_sm = UpdateStateMachine(runner, ctx)


    update_sm.params = params.copy()
    update_sm.transition_callback = callbacks.transition_callback
    update_sm.monitor_callback = callbacks.monitor_callback

    # Modify only after copy.