Пример #1
0
def setup(islandId):
    sor = SOR()
    hosts = sor.getHosts('purge-dispatcherV2')
    if hosts is None or len(hosts) != 1:
        print 'Did not find expected host for purge-dispatcherV2',hosts
        return 1
    purgeHost = hosts.pop()

    mc = ManagementContainer.getInstance()
    purgeDispatcher = Service('purge-dispatcherV2')
    preCalcSvc = Service('purge-precalc-service')
    
    try:
        purgeDispatcher.invoke('stop',purgeHost)
        preCalcSvc.invoke('stop',purgeHost)

        os.system("cd /tmp; /bin/rm -rf /tmp/corpus /tmp/policystats-1; tar -xf endtoend-test-corpus.tar; mv /tmp/corpus /tmp/policystats-1");
        os.system("sed -i 's/@enron.com/@policystats.com/g' /tmp/policystats-1/*");
        
        setupCustomer(mc,islandId,corpus='/tmp/policystats-1',name='PolicyStatsTest',domain='policystats.com',recvDate='now')
        
        return 0
    finally:
        purgeDispatcher.invoke('start',purgeHost)
        preCalcSvc.invoke('start',purgeHost)
Пример #2
0
def enableIslandEDiscovery( islandId, enabled ):

    mc = ManagementContainer.getInstance()
    island = mc.getIslandManager().getIsland( islandId )
    edMode = island.isEdiscoveryEnabled()
    if edMode != enabled:
        island.setEdiscoveryEnabled( enabled )
        mc.getIslandManager().updateIsland(island)

    res = os.system( '/opt/ems/bin/amp cluster -l | grep -v FeedOn | grep -E \" .*' + str( islandId ) + ' .*\" > /tmp/cluster.out')
    if res != 0:
        return ERROR_CLUSTER_FAILED
    linedata = open( '/tmp/cluster.out', 'r' ).read()
    items = linedata.split()
    enderServiceName = items[5]

    if island.getPlatformVersion().endswith( '-solr' ):
        enderServiceName = 'solrproxy-' + enderServiceName
    else:
        enderServiceName = 'fastproxy-' + enderServiceName
    proxyHost = items[10]
    print 'restarting proxy \"' + enderServiceName + '\" on \"' + proxyHost + '\" to ensure that island capability cache is in synch'

    proxy = Service( enderServiceName )
    proxy.invoke( 'restart', proxyHost )
    print 'proxy restarted'

    return edMode
Пример #3
0
def prepDataForPurge(mc, custid, purgeHost):
    pm = mc.getRetentionPolicyManager()
    rp = pm.getLongestRetentionPolicy(custid)
    rp.setDaysRetention(0)
    pm.savePolicy(rp)
    syncService = Service("purge-amdb-sync")
    syncService.invoke("restart", purgeHost)
    conn = None
    done = False
    maxTries = 24
    while not done and maxTries > 0:
        stmt = None
        try:
            conn = mc.getDBConnection(ManagementContainer.AM_POOL_NAME)
            stmt = conn.createStatement()
            rs = stmt.executeQuery("select retain_days from retention_policies where policy_id = %d" % rp.getId())
            done = rs is not None and rs.next() and rs.getInt(1) == 0
        finally:
            if conn is not None:
                conn.rollback()
            if rs is not None:
                rs.close()
            if stmt is not None:
                stmt.close()
            if conn is not None:
                mc.returnDBConnection(conn, ManagementContainer.AM_POOL_NAME)
        if not done:
            print "Waiting for retention policy to update on AM DB"
            time.sleep(10)
            maxTries = maxTries - 1
    if maxTries <= 0:
        raise Exception("purge-amdb-sync did not update policies")
Пример #4
0
def setup(islandId):
    sor = SOR()
    hosts = sor.getHosts("purge-dispatcherV2")
    if hosts is None or len(hosts) != 1:
        print "Did not find expected host for purge-dispatcherV2", hosts
        return 1
    purgeHost = hosts.pop()

    mc = ManagementContainer.getInstance()
    purgeDispatcher = Service("purge-dispatcherV2")
    preCalcSvc = Service("purge-precalc-service")

    try:
        purgeDispatcher.invoke("stop", purgeHost)
        preCalcSvc.invoke("stop", purgeHost)

        os.system(
            "cd /tmp; /bin/rm -rf /tmp/corpus /tmp/purge-1; tar -xf endtoend-test-corpus.tar; mv /tmp/corpus /tmp/purge-1"
        )
        os.system("sed -i 's/@enron.com/@purge1.com/g' /tmp/purge-1/*")

        setupCustomer(mc, islandId, corpus="/tmp/purge-1", name="purge1", domain="purge1.com", recvDate="now")

        return 0
    finally:
        purgeDispatcher.invoke("start", purgeHost)
        preCalcSvc.invoke("start", purgeHost)
Пример #5
0
def enableIslandEDiscovery( islandId, enabled ):

    file = File('/tmp/cluster_tmp.out')
    ampout = PrintStream(FileOutputStream(file))
    amptool = ActiveMailboxPartitionTool()
    amptool.setOutputStream(ampout)

    mc = ManagementContainer.getInstance()
    island = mc.getIslandManager().getIsland( islandId )
    edMode = island.isEdiscoveryEnabled()
    if edMode != enabled:
        island.setEdiscoveryEnabled( enabled )
        mc.getIslandManager().updateIsland(island)

    result = amptool.runCommand('cluster',array(['-l'],String))
    cmdline = 'cat /tmp/cluster_tmp.out | grep -v FeedOn | grep -E \" .*' + str( islandId ) + ' .*\" > /tmp/cluster.out'
    os.system(cmdline)
    if result is False:
        return ERROR_CLUSTER_FAILED
    linedata = open( '/tmp/cluster.out', 'r' ).read()
    items = linedata.split()
    enderServiceName = items[5]

    if island.getPlatformVersion().endswith( '-solr' ):
        enderServiceName = 'solrproxy-' + enderServiceName
    else:
        enderServiceName = 'fastproxy-' + enderServiceName
    proxyHost = items[10]
    print 'restarting proxy \"' + enderServiceName + '\" on \"' + proxyHost + '\" to ensure that island capability cache is in synch'

    proxy = Service( enderServiceName )
    proxy.invoke( 'restart', proxyHost )
    print 'proxy restarted'

    return edMode
Пример #6
0
def ensureServiceUp(serviceName, hostList):
    svc = Service(serviceName)
    result = svc.invoke('start',hostList)
    for r in result:
#      print result[r]
#      print result[r].isRunning()
      if result[r].isRunning() != 1:
        msg = 'service '+ serviceName+ ' failed to start on host '+ r
        print msg
        raise Exception(msg)
Пример #7
0
def enderStop(hostList, serviceName):
    print 'stopping service', serviceName,'on', hostList
    svc = Service(serviceName)
    results = svc.invoke('stop',hostList)
    for r in results :
        result = results[r]
        if result.isRunning() != 0:
          msg = 'service '+ serviceName+ ' failed to stop on host '+ r
          print msg
          raise Exception(msg)
Пример #8
0
def upServices(serviceName, hostList):
    upServers = []
    svc = Service(serviceName)
    result = svc.invoke('status',hostList)
    for r in result:
#      print result[r]
#      print result[r].getRetCode()
#      print result[r].isRunning()
      if result[r].isRunning() == 1 :
        upServers.append(r)
    return upServers
Пример #9
0
def enderStart(hostList, serviceName):
    print 'starting service', serviceName,'on', hostList
    svc = Service(serviceName)
    results = svc.invoke('start',hostList)
    for r in results :
        result = results[r]
        print 'running', result.isRunning()
        if result.isRunning() != 1 :
          msg = 'service '+ serviceName+ ' failed to start on host '+ hostList
          print msg
          raise Exception(msg)
Пример #10
0
def test(islandId, numMessages, esHosts):
    mc = ManagementContainer.getInstance()
    lastVal = None
    custId = None
    service = Service("storageimporter")
    msgs = None

    try:
        lastVal, nextVal = setMessageId(mc, Integer.MAX_VALUE - 2)
        print "lastVal,nextVal", lastVal, nextVal
        print "restart storageimporters", esHosts
        service.invoke("restart", esHosts)
        custId = setupCustomer(mc, islandId)
        msgs = findMessages(mc, custId, numMessages)
        print "created messages", msgs
        checkSearchStatus(mc, msgs, custId)

        ism = mc.getIndexSearchManager()
        isc = IndexSearchConstraint(None, None)
        isc.sortBy("storageid", True)
        isc.constrainByMinimumStorageId(0)
        isc.constrainByIsland(mc.getIslandManager().getIsland(islandId))
        print "DBG min", 0
        query = "customerid:" + str(custId)
        sr = ism.searchAndWrap(query, isc, CallerApp.INTERNAL)
        if sr.getDocCount() != 10:
            print >> sys.stderr, "Failed to find 10 documents when minStorageId = 0, found", sr.getDocCount()
            return 1
        print "DBG min", Integer.MAX_VALUE - 1
        isc.constrainByMinimumStorageId(Integer.MAX_VALUE - 1)
        sr = ism.searchAndWrap(query, isc, CallerApp.INTERNAL)
        if sr.getDocCount() != 9:
            print >> sys.stderr, "Failed to find 9 documents when minStorageId = ", Integer.MAX_VALUE - 1, ",found", sr.getDocCount()
            return 1
        print "DBG min", Integer.MAX_VALUE + 1
        isc.constrainByMinimumStorageId(Integer.MAX_VALUE + 1)
        sr = ism.searchAndWrap(query, isc, CallerApp.INTERNAL)
        if sr.getDocCount() != 7:
            print >> sys.stderr, "Failed to find 7 documents when minStorageId = ", Integer.MAX_VALUE + 1, ",found", sr.getDocCount()
            return 1
    finally:
        if msgs is not None:
            for msg in msgs:
                purgeMessage(mc, msg, custId)
        if custId is not None:
            mc.getCustomerManager().deleteCustomers([custId])
        if lastVal is not None:
            lastVal, nextVal = setMessageId(mc, lastVal)
            print "lastVal,nextVal", lastVal, nextVal
            print "restart storageimporters", esHosts
            service.invoke("restart", esHosts)
    return 0
Пример #11
0
def runit(action, svc_name, system_name):

    svc = Service(svc_name)

    r = svc.invoke(action, system_name)

    status = "error (pid 0)"

    if r.isFailed() :
        status = "error (pid 0)"
    elif r.isRunning() :
        status = "run (pid x)"
    else :
        status = "down (pid 0)"

    print system_name, svc_name, status
Пример #12
0
def upgradeMaster(islandID):

    clusterMgr = mc.getClusterManager()
    clusters = clusterMgr.enumClusters(islandID)
    for cluster in clusters:
        locs = clusterMgr.enumClusterLocations(cluster.getID())
        hosts = []
        for loc in locs:
            masterURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_MASTER_HOST_URL_PROP))
            masterHost = masterURL.getHost()
            hosts.append(masterHost)

        service = Service("ems-solr")
        service.setThreadCount(1)
        service.invoke("stop", hosts)

        failures = runJob(["/usr/local/bin/ender", "remote-action", "upgrade-support", "save-index"], hosts)
        if not failures.isEmpty():
            print "WARN: Failed to backup the index on all hosts. Check logs.", failures

        failures = runJob(["/usr/local/bin/ender", "remote-action", "upgrade-support", "upgrade-index"], hosts)
        if not failures.isEmpty():
            print "Failed to start the backup job running on all hosts. Check logs.", failures
            return 1

        failures = runJob(
            ["/usr/local/bin/ender", "remote-action", "upgrade-support", "upgrade-status"],
            hosts,
            sleepTime=120,
            maxWaits=43200,
            interimResult=3,
        )
        if not failures.isEmpty():
            print "ERROR: upgrade job failed. Upgrade aborted", failures
            return 1

        failures = runJob(["/usr/local/bin/ender", "remote-action", "upgrade-support", "remove-solr-service"], hosts)
        if not failures.isEmpty():
            print "ERROR: unable to remove solr service from all hosts", failures
            return 1

    print "Please check for any errors in the output before proceeding"
    print "If it appears the Solr index has been upgraded successfully. Please proceed to install the new version of Solr and then run the validate-master step"

    return 0
Пример #13
0
def setup(islandId,numMessages):
    mc = ManagementContainer.getInstance()
    svc = Service('storageimporter')
    
    # remote test
    setPartitionsWriteable(mc,True,True)   # local readonly
    setPartitionsWriteable(mc,False,False) # remote writeable
    svc.invoke('restart','localhost')
    try:
        os.system("cd /tmp; /bin/rm -rf /tmp/corpus /tmp/rgm-1; tar -xf endtoend-test-corpus.tar; mv /tmp/corpus /tmp/rgm-1");
        os.system("sed -i 's/@enron.com/@rgm1.com/g' /tmp/rgm-1/*");
        custId = setupCustomer(mc,islandId,corpus='/tmp/rgm-1',name='rgm-remote',domain='rgm1.com',recvDate='now')
        msgs = findMessages(mc,custId,numMessages)
        partId = msgs.iterator().next().getPartitionId()
        partition = mc.getPartitionManager().getPartition(partId)
        if partition.isLocal():
            print 'Expected messages to be stored remotely but found message on partition',partId
            return 1         
    finally:
        setPartitionsWriteable(mc,False,True)
        setPartitionsWriteable(mc,False,False)
    
    # local test
    setPartitionsWriteable(mc,False,True) # local writeable
    setPartitionsWriteable(mc,True,False) # remote readonly
    svc.invoke('restart','localhost')
    try:
        os.system("cd /tmp; /bin/rm -rf /tmp/corpus /tmp/rgm-2; tar -xf endtoend-test-corpus.tar; mv /tmp/corpus /tmp/rgm-2");
        os.system("sed -i 's/@enron.com/@rgm2.com/g' /tmp/rgm-2/*");
        custId = setupCustomer(mc,islandId,corpus='/tmp/rgm-2',name='rgm-local',domain='rgm2.com',recvDate='now')
        msgs = findMessages(mc,custId,numMessages)
        partId = msgs.iterator().next().getPartitionId()
        partition = mc.getPartitionManager().getPartition(partId)
        if not partition.isLocal():
            print 'Expected messages to be stored locally but found message on partition',partId
            return 1        
    finally: 
        setPartitionsWriteable(mc,False,True)
        setPartitionsWriteable(mc,False,False)
    return 0
Пример #14
0
def stopPurge(purgeHost, cleanup = True):
    purgeDispatcher = Service('purge-dispatcherV2')
    preCalcSvc = Service('purge-precalc-service')

    purgeDispatcher.invoke('stop',purgeHost)
    preCalcSvc.invoke('stop',purgeHost)

    if cleanup:
        for f in os.listdir('/ems/shared/purge'):
            parent = os.path.join('/ems/shared/purge',f)
            print 'checking path:', parent
            sentinel = os.path.join(parent,'sentinel')
            if os.path.exists(sentinel):
                print 'removing:', sentinel
                os.remove(sentinel)
Пример #15
0
def startCache(cacheHost):
    cacheHandler = Service('voltdb')
    cacheHandler.invoke('start',cacheHost)
    print time.asctime(),'Cache on host '+ cacheHost + ' has started'
Пример #16
0
def fullStatus(islandID):
    islandMgr = mc.getIslandManager()
    island = islandMgr.getIsland(int(islandID))
    print "INFO: island status for island", islandID
    print "INFO: search URL", islandID, island.getSearchURL()
    print "INFO: search Parmeters", islandID, island.getSearchParms()

    print "INFO: cluster and shard status for island", islandID
    clusterMgr = mc.getClusterManager()
    clusters = clusterMgr.enumClusters(islandID)
    masters = []
    slaves = []
    masterURLs = []
    slaveURLs = []
    for cluster in clusters:
        print "INFO: cluster", cluster.getID(), "F:", cluster.isFeedEnabled(), "U:", cluster.isUpdateEnabled(), "P:", cluster.isPurgeEnabled()
        locs = clusterMgr.enumClusterLocations(cluster.getID())
        for loc in locs:
            print "INFO: location", loc.getId(), "F:", loc.isFeedEnabled(), "U:", loc.isUpdateEnabled(), "P:", loc.isPurgeEnabled(), "M:", loc.isManagementEnabled()
            slaveURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_SLAVE_HOST_URL_PROP))
            slaveHost = slaveURL.getHost()
            slaves.append(slaveHost)
            masterURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_MASTER_HOST_URL_PROP))
            masterHost = masterURL.getHost()
            masters.append(masterHost)
            masterURL = masterURL.toString() + "/admin/system"
            masterURLs.append(masterURL)
            slaveURL = slaveURL.toString() + "/admin/system"
            slaveURLs.append(slaveURL)

    print "INFO: Service Status for island", islandID
    service = Service("ems-solr")
    service.setThreadCount(1)
    responses = service.invoke("status", masters)
    for key in responses.keySet():
        sr = responses.get(key)
        print "INFO: Master", key, sr.getStdout()
    responses = service.invoke("status", slaves)
    for key in responses.keySet():
        sr = responses.get(key)
        print "INFO: Slave", key, sr.getStdout()

    print "INFO: Version Info for island", islandID
    for masterURL in masterURLs:
        masterF = None
        try:
            masterF = urllib.urlopen(masterURL)
        except:
            print "WARN: Solr not running on", masterURL
            continue
        try:
            masterData = masterF.read()
            look = '<str name="solr-spec-version">'
            idx = masterData.find(look)
            idx = idx + len(look)
            end = masterData.find("</str>", idx)
            print "INFO:", masterURL, masterData[idx:end]
        finally:
            masterF.close()

    for slaveURL in slaveURLs:
        slaveF = None
        try:
            slaveF = urllib.urlopen(slaveURL)
        except:
            print "WARN: Solr not running on", slaveURL
            continue
        try:
            slaveData = slaveF.read()
            look = '<str name="solr-spec-version">'
            idx = slaveData.find(look)
            idx = idx + len(look)
            end = slaveData.find("</str>", idx)
            print "INFO:", slaveURL, slaveData[idx:end]
        finally:
            slaveF.close()

    print "INFO: Upgrade activities"
    ulog = "/tmp/.solr_upgrade_log_" + str(islandID)
    if os.path.exists(ulog):
        for line in open(ulog):
            print "INFO:", line.strip()
    else:
        print "INFO: No activity log found:", ulog
Пример #17
0
def upgradeSlave(islandID):
    clusterMgr = mc.getClusterManager()
    clusters = clusterMgr.enumClusters(islandID)
    hosts = []
    for cluster in clusters:
        locs = clusterMgr.enumClusterLocations(cluster.getID())
        for loc in locs:
            slaveURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_SLAVE_HOST_URL_PROP))
            slaveHost = slaveURL.getHost()
            masterURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_MASTER_HOST_URL_PROP))
            masterURL = masterURL.toString() + "/m1Monitor?command=register"
            slaveURL = slaveURL.toString()
            slaveURL = slaveURL + "/m1Monitor?command=shutdown"
            try:
                slaveF = urllib.urlopen(slaveURL)
            except:
                print "WARN: slave down, expected up", slaveURL
            logOutput(slaveF)
            slaveF.close()
            try:
                masterF = urllib.urlopen(masterURL)
            except:
                print "ERROR: master down, must be up", masterURL
                return 1
            logOutput(masterF)
            masterF.close()
            hosts.append(slaveHost)

    islandMgr = mc.getIslandManager()
    retries = 5
    done = False
    while not done and retries > 0:
        done = True
        island = islandMgr.getIsland(islandID)
        parms = island.getSearchParms()
        for cluster in clusters:
            locs = clusterMgr.enumClusterLocations(cluster.getID())
            for loc in locs:
                slaveURL = URL(loc.getClusterLocationProperty(SolrClusterAdapter.SOLR_SLAVE_HOST_URL_PROP)).toString()
                if slaveURL in parms:
                    print slaveURL, "still in search params, waiting for update"
                    done = False
        retries = retries - 1

    if not done and retries == 0:
        print "Unable to start slave upgrade because search URL does not show masters handling all search requests"
        return 1

    print "Stopping Solr on slaves"
    service = Service("ems-solr")
    service.setThreadCount(1)
    service.invoke("stop", hosts)

    print "Removing Solr service from slaves to prepare for upgrade"
    failures = runJob(["/usr/local/bin/ender", "remote-action", "upgrade-support", "remove-solr-service"], hosts)

    if failures.isEmpty():
        print "INFO: Search is now successfully running on masters, please use Extropy to upgrade slaves"
    else:
        print "WARN: check logs for errors before upgrading slaves. Host(s) reporting falilure:", failures

    return 0
Пример #18
0
def test(sourceIslandId,targetIslandId,numMessages):
    rc = 1
    mc = ManagementContainer.getInstance()
    cm = mc.getCustomerManager()

    srcIsland = mc.getIslandManager().getIsland(sourceIslandId)
    tgtIsland = mc.getIslandManager().getIsland(targetIslandId)
    tgtEDMode = None
    custid = None
    proxy = Service('solrproxy-Island102Cluster1')


    try:
        tgtEDMode = tgtIsland.isEdiscoveryEnabled()

        # turn on e-discovery on target island
        tgtIsland.setEdiscoveryEnabled(True)
        mc.getIslandManager().updateIsland(tgtIsland)
        print 'restarting proxy to ensure that island capability cache is in synch'
        proxy.invoke('restart','work-3')
        print 'proxy restarted'

        # setup customer
        custid = setupCustomer(mc,sourceIslandId)
        caps = mc.getCustomerManager().getCustomerCapabilities(int(custid))
        print custid,'ediscovery is',caps.getBooleanCapability(Capabilities.CAP_ALLOW_ARCHIVE_EDISCOVERY)

        rc = testMigrate(custid,sourceIslandId,targetIslandId,numMessages)
        
        if rc != 0:
            return rc

        # delete customer
        cm.deleteCustomers([custid])
        custid = None

        # stop the storage importer
        storageImporter = Service('storageimporter')
        storageImporter.invoke('stop','localhost')

        # turn off e-discovery on target island
        tgtEDMode = tgtIsland.isEdiscoveryEnabled()
        tgtIsland.setEdiscoveryEnabled(False)
        mc.getIslandManager().updateIsland(tgtIsland)
        print 'restarting proxy to ensure that island capability cache is in synch'
        proxy.invoke('restart','work-3')
        print 'proxy restarted'

        # create a customer, do not yet import mail
        custid = setupCustomer(mc,sourceIslandId)

        # turn off ediscovery for customer
        caps = mc.getCustomerManager().getCustomerCapabilities(int(custid))
        caps.setBooleanCapability(Capabilities.CAP_ALLOW_ARCHIVE_EDISCOVERY,False)
        mc.getCustomerManager().saveCustomerCapabilities(caps)
        caps = mc.getCustomerManager().getCustomerCapabilities(int(custid))
        print custid,'ediscovery is',caps.getBooleanCapability(Capabilities.CAP_ALLOW_ARCHIVE_EDISCOVERY)

        # import the messages after change to customer caps
        storageImporter.invoke('start','localhost')

        rc = testMigrate(custid,sourceIslandId,targetIslandId,numMessages)
    finally:
        if custid is not None:
            cm.deleteCustomers([custid])
        if tgtIsland is not None and tgtIsland.isEdiscoveryEnabled() != tgtEDMode:
            tgtIsland.setEdiscoveryEnabled(tgtEDMode)
            mc.getIslandManager().updateIsland(tgtIsland)
        print 'restarting proxy to ensure that island capability cache is in synch'
        proxy.invoke('restart','work-3')
        print 'proxy restarted'
    
    return rc
Пример #19
0
def startSearchDuplicateCleanup(searchDuplicateCleanupHost):
    searchDuplicateCleanup = Service('search-duplicate-cleanup')
    searchDuplicateCleanup.invoke('start',searchDuplicateCleanupHost)
Пример #20
0
def test(islandId, numMessages, custName):
    sor = SOR()
    hosts = sor.getHosts("purge-dispatcherV2")
    if hosts is None or len(hosts) != 1:
        print "Did not find expected host for purge-dispatcherV2", hosts
        sys.exit(1)
    purgeHost = hosts.pop()

    rc = True
    mc = ManagementContainer.getInstance()
    cm = mc.getCustomerManager()
    amsm = mc.getActiveMailboxStoreManager()
    purgeDispatcher = Service("purge-dispatcherV2")
    preCalcSvc = Service("purge-precalc-service")

    purgeDispatcher.invoke("stop", purgeHost)
    preCalcSvc.invoke("stop", purgeHost)

    custid = findCustomer(custName)
    if custid < 0:
        print "test failed because customer", custName, "was not found"
        return False

    prepDataForPurge(mc, custid, purgeHost)

    preCalcSvc.invoke("start", purgeHost)

    msgs = findMessages(mc, custid, numMessages)

    msg = selectMessage(mc, msgs)

    purgeMessage(mc, msg, custid)

    partition = mc.getPartitionManager().getPartition(msg.getPartitionId())

    tm = amsm.findMessages(
        [
            SearchConstraint(
                IActiveMailboxStoreManager.PROP_CUST_ID, SearchConstraintOperator.CONSTRAINT_EQUALS, int(custid)
            ),
            SearchConstraint(
                IActiveMailboxStoreManager.PROP_MSG_ID, SearchConstraintOperator.CONSTRAINT_EQUALS, msg.getMessageId()
            ),
        ],
        partition,
    )

    if not tm.isEmpty():
        print "purge failed to delete message", msg
        sys.exit(1)

    for p in mc.getPartitionManager().listPartitions():
        amsm.purgeRequest(p)

    now = Date()

    purgeDispatcher.invoke("start", purgeHost)

    done = checkLastPurge(mc, now)

    if done:
        for msg in msgs:
            partition = mc.getPartitionManager().getPartition(msg.getPartitionId())
            tm = amsm.findMessages(
                [
                    SearchConstraint(
                        IActiveMailboxStoreManager.PROP_CUST_ID, SearchConstraintOperator.CONSTRAINT_EQUALS, int(custid)
                    ),
                    SearchConstraint(
                        IActiveMailboxStoreManager.PROP_MSG_ID,
                        SearchConstraintOperator.CONSTRAINT_EQUALS,
                        msg.getMessageId(),
                    ),
                ],
                partition,
            )
            if not tm.isEmpty():
                print "purge failed to delete", msg
                rc = False
    else:
        rc = False

    if rc is True:
        print "purge successful"
    else:
        print "purge failed"

    sys.exit(rc is True)
Пример #21
0
def startPurge(purgeHost):
    purgeDispatcher = Service('purge-dispatcherV2')
    preCalcSvc = Service('purge-precalc-service')

    purgeDispatcher.invoke('start',purgeHost)
    preCalcSvc.invoke('start',purgeHost)
Пример #22
0
def test(islandId):
  global purgeClient
  global preCalcSvc
  global purgeDispatcher
  global purgeHost
  
  sor = SOR()
  hosts = sor.getHosts('purge-dispatcherV2')
  if hosts is None or len(hosts) != 1:
    print 'Did not find expected host for purge-dispatcherV2',hosts
    sys.exit(1)
  purgeHost = hosts.pop()

  # ran into some shell issues. this apparently fixes it
  os.system("cd")

  rc = False
  cm = mc.getCustomerManager()
  purgeDispatcher = Service('purge-dispatcherV2')
  preCalcSvc = Service('purge-precalc-service')
  purgeClient = Service('purge-client')

  debug("test: killing precalc and purge-dispatcher services on " + purgeHost + " and purge on local machine")
  purgeDispatcher.invoke('stop',purgeHost)
  preCalcSvc.invoke('stop',purgeHost)
  purgeClient.invoke('stop',localhost)
  if not os.path.exists(sentinelSpoolDir): os.makedirs(sentinelSpoolDir)

  try:
    debug("test: creating new customer")

    # order of test cases (all have valid sentinel file in shared - do not change order!):
    #   valid batch in shared directory, completely corrupt copy in bigdisk
    #   valid batch in shared directory, no copy in bigdisk
    #   completely corrupted batch in shared directory, completely corrupt copy in bigdisk
    #   completely corrupted batch in shared directory, no copy in bigdisk
    #   no batch in shared directory, no copy in bigdisk
    #   valid batch in shared, no reading permissions copy in bigdisk
    #   partially valid batch in shared, partially valid copy in bigdisk
    #   partially valid batch in shared, completely corrupt copy in bigdisk
    #   partially valid batch in shared, no copy in bigdisk
    #   valid batch in shared directory, partially valid copy in bigdisk
    #
    #   regression should be done in last test case!


    rc = genericTest()
    if rc: rc = genericTest(shouldDeleteSpoolBatch=True)
    if rc: rc = genericTest(shouldMuckUpWorkBatch=True)
    if rc: rc = genericTest(shouldMuckUpWorkBatch=True, shouldDeleteSpoolBatch=True)
    if rc: rc = genericTest(shouldDeleteWorkBatch=True, shouldDeleteSpoolBatch=True, shouldMuckUpBatchFileInSpoolDir=False)
    if rc: rc = genericTest(shouldChangePermissions=True)
    if rc: rc = genericTest(shouldPartialWorkBatch=True, shouldPartialSpoolBatch=True)
    if rc: rc = genericTest(shouldPartialWorkBatch=True, shouldMuckUpWorkBatch=True)
    if rc: rc = genericTest(shouldPartialWorkBatch=True, shouldDeleteSpoolBatch=True)
    if rc: rc = genericTest(shouldPartialSpoolBatch=True)# , shouldDoRegression=True)
        
  finally:
    # clean up
    cleanPurgeDirs()
    debug("test: restarting purge dispatcher, purge, and precalc services")
    purgeDispatcher.invoke('start',purgeHost)
    preCalcSvc.invoke('start',purgeHost)
    purgeClient.invoke('start',localhost)

  debug("test: returning " + str(rc is not True))
  log.close()
  sys.exit(rc is not True)
Пример #23
0
def test(islandId,numMessages,custName):
    sor = SOR()
    hosts = sor.getHosts('purge-dispatcherV2')
    if hosts is None or len(hosts) != 1:
        print 'Did not find expected host for purge-dispatcherV2',hosts
        sys.exit(1)
    purgeHost = hosts.pop()

    mc = ManagementContainer.getInstance()
    cm = mc.getCustomerManager()
    amsm = mc.getActiveMailboxStoreManager()
    preCalcSvc = Service('purge-precalc-service')
    
    preCalcSvc.invoke('stop',purgeHost)
    
    custid = findCustomer(custName)
    if custid < 0:
        print 'test failed because customer',custName,'was not found'
        return False

    clearPolicyStats(custid)

    rpm = mc.getRetentionPolicyManager()
    rp = rpm.getLongestRetentionPolicy(custid)
    pmm = mc.getPolicyMetaManager()

    policyStats = pmm.getPolicyStats(custid) 

    if not policyStats.isEmpty():
        print 'test failed because policy stats was not empty for customer',custid
        return False

    msgs = findMessages(mc,custid,numMessages)
        
    for p in mc.getPartitionManager().listPartitions():
        amsm.purgeRequest(p)

    preCalcSvc.invoke('start',purgeHost)

    # wait for the stats to get generated
    maxTries = 60
    while not areStatsGenerated(custid) and maxTries > 0:
        print 'Waiting for pre-calc service to generate stats...'
        time.sleep(10)
        maxTries = maxTries - 1

    if not areStatsGenerated(custid):
        print 'test failed because policy stats were not generated within time alotted'
        return False

    os.system('ssh -o StrictHostKeyChecking=no %s "bash /opt/ems/bin/policy_stats"' % purgeHost)

    policyStats = pmm.getPolicyStats(custid) 

    if policyStats.isEmpty():
        print 'test failed because policy stats was empty for customer',custid
        return False

    if not policyStats.containsKey(rp.getId()):
        print 'test failed becasue policy stats did not contain an entry for id',rp.getId()
        return False

    stat = policyStats.get(rp.getId())

    print 'users',stat.getNumUsers(),'size',stat.getMessageSize(),'count',stat.getMessageCount()
    
    if numMessages != stat.getMessageCount():
        print 'test failed because expected',numMessages,'but only found',stat.getMessageCount(),'in stats'

    return True
Пример #24
0
def stopCache(cacheHost):
    cacheHandler = Service('voltdb')
    cacheHandler.invoke('stop',cacheHost)
    time.sleep(10)
    print time.asctime(),'Cache on host '+ cacheHost + ' has stopped'