Esempio n. 1
0
 def _verify_damage( self, nodes ):
     for node in nodes:
         self.assertFalse( self._content_matches( node ), 'Failed to damage AU file: %s' % node.url )
     if nodes:
         log.info( 'Damaged the following node(s) on client %s:\n\t\t\t%s' % ( self.victim, '\n\t\t\t'.join( str( node ) for node in nodes ) ) )
     else:
         log.info( 'No nodes damaged on client %s' %  self.victim )
Esempio n. 2
0
 def _verify_damage(self, nodes):
     for node in nodes:
         self._ensure_victim_node_deleted(node)
     log.info(
         "Deleted the following node(s) on client %s:\n\t\t\t%s"
         % (self.victim, "\n\t\t\t".join(str(node) for node in nodes))
     )
Esempio n. 3
0
    def _damage_AU( self ):
        nodes = self.victim.getAuNodesWithContent( self.AU )
        log.info( 'Backing up cache configuration on victim cache...' )
        self.victim.backupConfiguration()
        log.info( 'Backed up successfully' )

        self.victim.daemon.stop()
        log.info( 'Stopped daemon running on UI port %s' % self.victim.port )
        self.victim.simulateDiskFailure()
        log.info( 'Deleted entire contents of cache on stopped daemon' )

        # Write a TitleDB entry for the simulated AU so it will be marked 'publisher down' when restored.
        self.framework.appendLocalConfig( { 'org.lockss.auconfig.allowEditDefaultOnlyParams': True,
                                            'org.lockss.title.sim1.journalTitle': 'Simulated Content',
                                            'org.lockss.title.sim1.param.1.key': 'root',
                                            'org.lockss.title.sim1.param.1.value': 'simContent',
                                            'org.lockss.title.sim1.param.2.key': 'depth',
                                            'org.lockss.title.sim1.param.2.value': 0,
                                            'org.lockss.title.sim1.param.3.key': 'branch',
                                            'org.lockss.title.sim1.param.3.value': 0,
                                            'org.lockss.title.sim1.param.4.key': 'numFiles',
                                            'org.lockss.title.sim1.param.4.value': 30,
                                            'org.lockss.title.sim1.param.pub_down.key': 'pub_down',
                                            'org.lockss.title.sim1.param.pub_down.value': True,
                                            'org.lockss.title.sim1.plugin': 'org.lockss.plugin.simulated.SimulatedPlugin',
                                            'org.lockss.title.sim1.title': 'Simulated Content: simContent' }, self.victim )
        time.sleep( 5 ) # Settling time

        self.victim.daemon.start()
        # Wait for the client to come up
        self.assert_( self.victim.waitForDaemonReady(), 'Daemon is not ready' )
        log.info( 'Started daemon running on UI port %s' % self.victim.port)

        return nodes
Esempio n. 4
0
 def runTest( self ):
     HTML = self.tinyUiClient.getAdminUi().read()
     pattern = 'This LOCKSS box \(.*\) has not started because it is unable to load configuration data'
     self.assert_( re.search( pattern, HTML, re.DOTALL | re.MULTILINE ), 'No match for "%s" in\n%s' % ( pattern, HTML ) )
     pattern = "Shouldn't happen"
     self.assertFalse( re.search( pattern, HTML, re.DOTALL | re.IGNORECASE | re.MULTILINE ), 'Unexpected match for "%s"' % pattern )
     self.assert_( re.search( self.expected_pattern, HTML, re.DOTALL | re.MULTILINE ), 'No match for "%s" in\n%s' % ( self.expected_pattern, HTML ) )
     log.info( 'Found "%s"' % self.expected_pattern )
Esempio n. 5
0
 def _update_configuration( self ):
     self.framework.appendLocalConfig( { 'org.lockss.poll.v3.toplevelPollInterval': 10 }, self.victim )
     self.victim.reloadConfiguration()
     if self.toggle_AU_activation:
         log.info( 'Deactivating AU' )
         self.victim.deactivateAu( self.AU )
         log.info( 'Reactivating AU' )
         self.victim.reactivateAu( self.AU )
Esempio n. 6
0
 def _check_v3_result( self, nodes ):
     log.info( 'Waiting for V3 repair...' )
     self._await_repair( nodes )
     self._verify_repair( nodes )
     self._verify_poll_results()
     self._verify_voter_agreements()
     self._verify_voters_counts()
     log.info( 'AU successfully repaired' )
Esempio n. 7
0
    def setUp( self ):
        LockssTestCases.setUp( self )
        self._start_framework()

        # Block return until all clients are ready to go.
        log.info( 'Waiting for framework to become ready' )
        self.tinyUiClient = self.clients[ 0 ]
        time.sleep( 2 )
        self.tinyUiClient.waitForCanConnectToHost( sleep = 2 )
Esempio n. 8
0
 def _create_AU_nodes(self, minimum, maximum):
     # Damage the AU by creating extra nodes
     nodes = self.victim.randomCreateRandomNodes(self.AU, minimum, maximum)
     log.info(
         "Created the following nodes on client %s:\n\t\t\t%s"
         % (self.victim, "\n\t\t\t".join(str(node) for node in nodes))
     )
     self._set_expected_agreement_from_extra(nodes)
     return nodes
Esempio n. 9
0
    def runTest( self ):
        self._setup_AU()
        nodes = self._damage_AU()
        self._verify_damage( nodes )
        # enable polling
        self._enableVictimPoller()

        log.info( 'Waiting for a V3 poll to be called...' )
        self.assert_( self.victim.waitForV3Poller( self.AU ), 'Timed out while waiting for V3 poll' )
        log.info( 'Successfully called a V3 poll' )
        self._check_v3_result( nodes )
Esempio n. 10
0
    def _verify_damage( self, nodes ):
        self.assertFalse( self.victim.hasAu( self.AU ), 'AU still intact' )

        # Restore the backup file
        log.info( 'Restoring cache configuration...' )
        self.victim.restoreConfiguration( self.AU )
        log.info( 'Restored successfully' )

        # These should be equal AU IDs, so both should return true
        self.assert_( self.victim.hasAu( self.AU ) )
        self.assert_( self.victim.isPublisherDown( self.AU ) )
Esempio n. 11
0
    def runTest(self):
        self._setup_AU()
        # disable polling?
        nodes = self._damage_AU()
        self._verify_damage(nodes)
        # enable polling?

        log.info("Waiting for a V3 poll to be called...")
        self.assert_(self.victim.waitForV3Poller(self.AU), "Timed out while waiting for V3 poll")
        log.info("Successfully called a V3 poll")
        self._check_v3_result(nodes)
Esempio n. 12
0
 def _verify_repair( self, nodes ):
     # XXX need to identify files not count them
     node_count = 0
     repair_count = 0
     for node in nodes:
         node_count = node_count + 1
         if self.victim.isAuNode( self.AU, node.url ):
             if self._content_matches( node ):
                 repair_count = repair_count + 1
     log.info( '%i nodes %i repaired' % ( node_count, repair_count ) )
     # There is a small chance that the following tests will generate
     # a false negative, about the chance of tossing 120 heads in a row
     self.assert_( repair_count > 0 )
     self.assert_( repair_count < node_count )
Esempio n. 13
0
    def _verify_damage( self, nodes ):
        V3TestCases._verify_damage( self, nodes )
        
        log.debug( 'victim.getNoAuPeers( self.AU ): %s' % self.victim.getNoAuPeers( self.AU ) )
        self.assertEqual( self.victim.getNoAuPeers( self.AU ), [ self.client_without_AU.getV3Identity() ], 'Peer without AU disappeared!' )

        log.info( 'Waiting for a V3 poll to be called...' )
        # Ignores first victim poll
        self.assert_( self.victim.waitForV3Poller( self.AU, [ self.victim_first_poll_key ] ), 'Timed out while waiting for V3 poll' )
        log.info( 'Successfully called a V3 poll' )

        victim_second_poll_key = self.victim.getV3PollKey( self.AU, self.victim_first_poll_key )
        log.debug( "Victim's second poll key: " + victim_second_poll_key )
        invitees = self.victim.getV3PollInvitedPeers( victim_second_poll_key )
        log.debug( 'invitedPeers: %s' % invitees )
        self.assertFalse( self.client_without_AU.getV3Identity() in invitees, 'Peer without AU invited in 2nd poll' )
Esempio n. 14
0
    def tearDown( self ):
        # Dump threads and look for deadlocks (independent of success)
        deadlockLogs = self.framework.checkForDeadlock()
        if deadlockLogs:
            log.error( 'Deadlocks detected!' )
            self.fail( 'Failing due to deadlock detection.  Check the following log file(s): ' + ', '.join( deadlockLogs ) )
        else:
            log.info( 'No deadlocks detected' )

        if self.delayShutdown:
            raw_input( '>>> Delaying shutdown.  Press Enter to continue...' )

        log.info( 'Stopping framework' )
        self.framework.stop()
        self.failIf( self.framework.isRunning, 'Framework did not stop' )

        unittest.TestCase.tearDown( self )
Esempio n. 15
0
    def tearDown(self):
        # Dump threads and look for deadlocks (independent of success)
        deadlockLogs = self.framework.checkForDeadlock()
        if deadlockLogs:
            log.error("Deadlocks detected!")
            self.fail("Failing due to deadlock detection.  Check the following log file(s): " + ", ".join(deadlockLogs))
        else:
            log.info("No deadlocks detected")

        if self.delayShutdown:
            raw_input(">>> Delaying shutdown.  Press Enter to continue...")

        log.info("Stopping framework")
        self.framework.stop()
        self.failIf(self.framework.isRunning, "Framework did not stop")

        unittest.TestCase.tearDown(self)
Esempio n. 16
0
    def setUp( self ):
        LockssTestCases.setUp( self )
        self.victim = self.clients[ 0 ]
        self.nonVictim = self.clients[ 1 ]

        for client in self.clients:
            extraConf = { 'org.lockss.auconfig.allowEditDefaultOnlyParams': True,
                          'org.lockss.id.initialV3PeerList': ';'.join( [ peer.getV3Identity() for peer in self.clients ] + self.offline_peers ),
                          'org.lockss.platform.v3.identity': client.getV3Identity(),
                          'org.lockss.poll.v3.enableV3Poller': client is self.victim,
                          'org.lockss.poll.v3.enableV3Voter': True }
            extraConf.update( self.local_configuration )
            self.framework.appendLocalConfig( extraConf, client )
        self._start_framework()

        # Block return until all clients are ready to go.
        log.info( 'Waiting for framework to become ready' )
        self.framework.waitForFrameworkReady()
Esempio n. 17
0
 def _await_V3_poll_agreement( self ):
     # Expect to see a top level content poll called by all peers
     log.info( 'Waiting for a V3 poll by all simulated caches' )
     for client in self.clients:
         self.assert_( client.waitForV3Poller( self.AU ), 'Never called V3 poll' )
         log.info( 'Client on port %s called V3 poll...' % client.port )
     # Expect each client to have won a top-level v3 poll
     log.info( 'Waiting for all peers to win their polls' )
     for client in self.clients:
         self.assert_( client.waitForWonV3Poll( self.AU, self.timeout ), 'Client on port %s did not win V3 poll' % client.port )
         log.info( 'Client on port %s won V3 poll...' % client.port )
Esempio n. 18
0
    def _verify_voter_agreements( self ):
        poll_key = self.victim.getV3PollKey( self.AU )
        for client in self.clients:
            if client != self.victim:
                repairer_info = client.getAuRepairerInfo( self.AU, 'LastPercentAgreement' )
                repairer_count = len( repairer_info)
                for ( box, agreement ) in repairer_info.iteritems():
                    self.assertEqual(self.expected_voter_agreement,
                                     agreement,
                                     'Client %s wrong agreement %s with box %s' % ( client, agreement, box))
                    if self.symmetric == True:
                        log.info( 'Symmetric client %s repairers OK' % client )
                    else:
                        log.info( 'Asymmetric client %s repairers OK' % client )

        repairer_info = self.victim.getAuRepairerInfo( self.AU, 'LastPercentAgreement' )
        self.assertEqual( len( self.clients ) - 1, len( repairer_info ) )
        for ( box, agreement ) in repairer_info.iteritems():
            log.debug( "Client %s box %s agree %s" % ( self.victim, box, agreement ) )
            self.assertEqual( agreement, self.expected_agreement, 
                       'Voter %s had actual agreement: %s expected: %s' % ( box, agreement, self.expected_agreement ) )
Esempio n. 19
0
    def _damage_AU(self):
        nodes = self.victim.getAuNodesWithContent(self.AU)
        log.info("Backing up cache configuration on victim cache...")
        self.victim.backupConfiguration()
        log.info("Backed up successfully")

        self.victim.daemon.stop()
        log.info("Stopped daemon running on UI port %s" % self.victim.port)
        self.victim.simulateDiskFailure()
        log.info("Deleted entire contents of cache on stopped daemon")

        # Write a TitleDB entry for the simulated AU so it will be marked 'publisher down' when restored.
        self.framework.appendLocalConfig(
            {
                "org.lockss.auconfig.allowEditDefaultOnlyParams": True,
                "org.lockss.title.sim1.journalTitle": "Simulated Content",
                "org.lockss.title.sim1.param.1.key": "root",
                "org.lockss.title.sim1.param.1.value": "simContent",
                "org.lockss.title.sim1.param.2.key": "depth",
                "org.lockss.title.sim1.param.2.value": 0,
                "org.lockss.title.sim1.param.3.key": "branch",
                "org.lockss.title.sim1.param.3.value": 0,
                "org.lockss.title.sim1.param.4.key": "numFiles",
                "org.lockss.title.sim1.param.4.value": 30,
                "org.lockss.title.sim1.param.pub_down.key": "pub_down",
                "org.lockss.title.sim1.param.pub_down.value": True,
                "org.lockss.title.sim1.plugin": "org.lockss.plugin.simulated.SimulatedPlugin",
                "org.lockss.title.sim1.title": "Simulated Content: simContent",
            },
            self.victim,
        )
        time.sleep(5)  # Settling time

        self.victim.daemon.start()
        # Wait for the client to come up
        self.assert_(self.victim.waitForDaemonReady(), "Daemon is not ready")
        log.info("Started daemon running on UI port %s" % self.victim.port)

        return nodes
Esempio n. 20
0
    def runTest( self ):
        self._setup_AU()
        # disable polling?
        nodes = self._damage_AU()
        self._verify_damage( nodes )
        # enable polling?

        log.info( 'Waiting for a V3 poll to be called...' )
        self.assert_( self.victim.waitForV3Poller( self.AU ), 'Timed out while waiting for V3 poll' )
        log.info( 'Successfully called a V3 poll' )

        log.info( 'Waiting for V3 repair...' )
        self._await_repair( nodes )
        self._verify_repair( nodes )
        log.info( 'AU successfully repaired' )
Esempio n. 21
0
 def _setup_AU( self ):
     self.AU = lockss_daemon.Simulated_AU( **self.simulated_AU_parameters )
     log.info( "Creating simulated AU's" )
     for client in self.clients:
         client.createAu( self.AU )
     for client in self.clients:
         client.waitAu( self.AU )
     log.info( "Waiting for simulated AU's to crawl" )
     for client in self.clients:
         self.assert_( client.waitForSuccessfulCrawl( self.AU ), "AU's did not complete initial crawl" )
     log.info( "AU's completed initial crawl" )
Esempio n. 22
0
 def setUp( self ):
     unittest.TestCase.setUp( self )
     # Log start of test
     log.info( len( self.__doc__ )*'=' )
     log.info( self.__doc__ )
     log.info( len( self.__doc__ )*'-' )
     # Create a framework for the test
     self.framework = lockss_daemon.Framework( self.daemon_count, self.config_URLs, self.start_UI_port )
     # List of daemon clients
     self.clients = self.framework.clientList
     # Enable clean-up after user interruption
     frameworkList.append( self.framework )
Esempio n. 23
0
 def runTest( self ):
     self._setup_AU()
     self.expected_agreement = '100.00'
     self.expected_voter_agreement = '100.00'
     self._enableVictimPoller()
     # Wait for the first poll to finish
     log.info( 'Waiting for a V3 poll to be called...' )
     self.assert_( self.victim.waitForV3Poller( self.AU ), 'Timed out while waiting for first V3 poll' )
     log.info( 'Successfully called first V3 poll' )
     self.assert_( self.victim.waitForCompletedV3Poll( self.AU ), 'Timed out while waiting for poll to complete' )
     self._verify_poll_results()
     self._verify_voter_agreements()
     self._verify_voters_counts()
     # Destroy the AU
     nodes = self._damage_AU()
     self._verify_damage( nodes )
     log.info( 'Waiting for second V3 poll to be called...' )
     self.assert_( self.victim.waitForV3Poller( self.AU ), 'Timed out while waiting for second V3 poll' )
     log.info( 'Successfully called second V3 poll' )
     # Check the result
     self._await_repair(nodes)
     self._check_v3_result( nodes )
Esempio n. 24
0
 def _check_v3_result( self, nodes ):
     log.info( 'Waiting for poll to complete...' )
     self._await_complete( nodes )
     self._verify_poll_results()
     log.info( 'AU successfully repaired' )
Esempio n. 25
0
 def _await_complete( self, nodes ):
     log.info( 'Waiting for V3 poll to complete...' )
     self.assert_( self.victim.waitForCompletedV3Poll( self.AU ), 'Timed out while waiting for poll to complete' )
     log.info( 'Poll successfully completed' )
Esempio n. 26
0
 def _damage_AU( self ):
     # Damage the AU by creating an extra node
     node = self.victim.createNode( self.AU, '000extrafile.txt' )
     log.info( 'Created file %s on client %s' % ( node.url, self.victim ) )
     self._set_expected_agreement_from_extra( [ node ] )
     return [ node ]
Esempio n. 27
0
 def _damage_AU( self ):
     # Damage the AU by creating an extra node that should sort LAST in the list of CachedUrls
     node = self.victim.createNode( self.AU, 'zzzzzzzzzzzzz.txt' )
     log.info( 'Created file %s on client %s' % ( node.url, self.victim ) )
     self._set_expected_agreement_from_extra( [ node ] )
     return [ node ]
Esempio n. 28
0
 def _await_repair( self, nodes ):
     log.info( 'Waiting for V3 poll to report no quorum...' )
     self.assert_( self.victim.waitForV3NoQuorum( self.AU ), 'Timed out while waiting for no quorum' )
     log.info( 'AU successfully reported No Quorum' )
Esempio n. 29
0
 def _await_repair( self, nodes ):
     # Expect to see the AU successfully repaired
     log.info( 'Waiting for successful V3 repair of entire AU' )
     self.assert_( self.victim.waitForV3Repair( self.AU, timeout = self.timeout ), 'AU was not repaired by V3' )
Esempio n. 30
0
 def _check_v3_result( self, nodes ):
     log.info( 'Waiting for poll to complete...' )
     self._await_complete( nodes )
     self._verify_poll_results()
     log.info( 'AU repair not verified, as expected' )