Esempio n. 1
0
def Quit(msg):
    try:
        import log
        log.Quit(msg)
    except ImportError:
        sys.stderr.write(msg + '\n')
        sys.exit(4)
Esempio n. 2
0
def _ImportDll():
    args = blue.pyos.GetArg()
    hasboot = hasattr(__builtins__, 'boot') or 'boot' in __builtins__
    if hasboot and boot.role in (
            'server',
            'proxy') and '/jessica' not in args and '/minime' not in args:
        raise RuntimeError("Don't import trinity on the proxy or server")
    triPlatform = _GetPreferredPlatform()
    triType = DEFAULT_TRI_TYPE
    rightHanded = False
    for arg in args:
        arg = arg.lower()
        if arg.startswith('/triplatform'):
            s = arg.split('=')
            triPlatform = s[1]
        elif arg.startswith('/tritype'):
            s = arg.split('=')
            triType = s[1]
        elif arg == '/righthanded':
            rightHanded = True

    if triType not in VALID_TRI_TYPES:
        log.Quit('Invalid Trinity dll type')
    if triPlatform not in VALID_TRI_PLATFORMS:
        log.Quit('Invalid Trinity platform')
    dllName = '_trinity_%s_%s' % (triPlatform, triType)
    print 'Starting up Trinity through %s ...' % dllName
    RobustImport(dllName)
    if rightHanded:
        SetRightHanded(True)
        settings.SetValue('geometryResNormalizeOnLoad', True)
        print 'Trinity is using a right-handed coordinate system'
    if hasattr(blue.memoryTracker, 'd3dHeap1'):
        if GetD3DCreatedHeapCount() > 0:
            blue.memoryTracker.d3dHeap1 = GetD3DCreatedHeap(0)
        if GetD3DCreatedHeapCount() > 1:
            blue.memoryTracker.d3dHeap2 = GetD3DCreatedHeap(1)
    return triPlatform
Esempio n. 3
0
 def __init__(self, startInline=[]):
     log.LogMixin.__init__(self, 'svc.ServiceManager')
     self.state = service.SERVICE_START_PENDING
     self.services = {}
     self.dependants = {}
     self.notify = {}
     self.notifyObs = {}
     self.startInline = startInline
     self.blockedServices = []
     self.startupTimes = {}
     import __builtin__
     if hasattr(__builtin__, 'sm'):
         log.Quit(
             'Multiple instances of ServiceManager are not allowed in a process'
         )
     __builtin__.sm = self
Esempio n. 4
0
def _ImportDll():
    """
    Imports the Trinity dll, selecting version to use based on command line arguments.
    Returns the platform selected.
    """
    triPlatform = _os.getenv('TRINITYPLATFORM', DEFAULT_TRI_PLATFORM)
    triType = _os.getenv('TRINITYTYPE', DEFAULT_TRI_TYPE)
    disablePlatformCheck = False
    for arg in _blue.pyos.GetArg():
        arg = arg.lower()
        if arg.startswith('/triplatform'):
            s = arg.split('=')
            triPlatform = s[1]
        elif arg.startswith('/tritype'):
            s = arg.split('=')
            triType = s[1]
        elif arg == '/no-platform-check':
            disablePlatformCheck = True

    if triType not in VALID_TRI_TYPES:
        import log
        log.Quit('Invalid Trinity dll type')
    if not disablePlatformCheck:
        if triPlatform.startswith('dx'):
            availablePlatforms.InstallDirectXIfNeeded()
        validPlatforms = availablePlatforms.GetAvailablePlatforms()
        if triPlatform not in validPlatforms:
            _logger.warn('Invalid Trinity platform %s' % triPlatform)
            triPlatform = validPlatforms[0]
            _logger.info('Using Trinity platform %s instead' % triPlatform)
    dllName = '_trinity_%s_%s' % (triPlatform, triType)
    print 'Starting up Trinity through %s ...' % dllName
    _RobustImport(dllName)
    if hasattr(_blue, 'memoryTracker') and hasattr(_blue.memoryTracker,
                                                   'd3dHeap1'):
        if GetD3DCreatedHeapCount() > 0:
            _blue.memoryTracker.d3dHeap1 = GetD3DCreatedHeap(0)
        if GetD3DCreatedHeapCount() > 1:
            _blue.memoryTracker.d3dHeap2 = GetD3DCreatedHeap(1)
    return triPlatform
Esempio n. 5
0
def RobustImport(moduleName, moduleNameForFallback=None):
    try:
        mod = __import__(moduleName, fromlist=['*'])
    except ImportError:
        import imp
        if imp.get_suffixes()[0][0] == '_d.pyd':
            InstallSystemBinaries('DirectXRedistForDebug.exe')
        else:
            InstallSystemBinaries('DirectXRedist.exe')
        try:
            mod = __import__(moduleName, fromlist=['*'])
        except ImportError:
            if moduleNameForFallback:
                print 'Import failed on %s, falling back to %s ...' % (
                    moduleName, moduleNameForFallback)
                mod = __import__(moduleNameForFallback, fromlist=['*'])
            else:
                log.Quit('Failed to import trinity DLL')

    for memberName in dir(mod):
        globals()[memberName] = getattr(mod, memberName)

    del mod
Esempio n. 6
0
def _StoreGPUInfoInBreakpadHeaders():
    """
    Detect and save GPU information in Breakpad headers.
    This is extremely useful for graphics crashes and required by NVidia for them to look at driver crashes
    """
    try:
        adapterInfo = adapters.GetAdapterInfo(adapters.DEFAULT_ADAPTER)
        blue.SetCrashKeyValues(u'GPU_Description', adapterInfo.description)
        blue.SetCrashKeyValues(u'GPU_Driver', unicode(adapterInfo.driver))
        blue.SetCrashKeyValues(u'GPU_VendorId', unicode(adapterInfo.vendorID))
        blue.SetCrashKeyValues(u'GPU_DeviceId', unicode(adapterInfo.deviceID))
        blue.SetCrashKeyValues(u'trinityPlatform', unicode(platform))
        try:
            driverInfo = adapterInfo.GetDriverInfo()
            blue.SetCrashKeyValues(u'GPU_Driver_Version',
                                   unicode(driverInfo.driverVersionString))
            blue.SetCrashKeyValues(u'GPU_Driver_Date',
                                   unicode(driverInfo.driverDate))
            blue.SetCrashKeyValues(u'GPU_Driver_Vendor',
                                   unicode(driverInfo.driverVendor))
            blue.SetCrashKeyValues(u'GPU_Driver_Is_Optimus',
                                   u'Yes' if driverInfo.isOptimus else u'No')
            blue.SetCrashKeyValues(
                u'GPU_Driver_Is_Amd_Switchable',
                u'Yes' if driverInfo.isAmdDynamicSwitchable else u'No')
        except RuntimeError:
            blue.SetCrashKeyValues(u'GPU_Driver_Version',
                                   unicode(adapterInfo.driverVersion))

    except RuntimeError:
        pass
    except SystemError:
        if platform == 'dx11':
            import log
            log.Quit(
                'Video card may not support DX11 - setting preferred platform to DX9'
            )
Esempio n. 7
0
    def SetDevice(self,
                  device,
                  tryAgain=1,
                  fallback=0,
                  keepSettings=1,
                  hideTitle=None,
                  userModified=False,
                  muteExceptions=False,
                  updateWindowPosition=True):
        if hideTitle is None:
            hideTitle = not device.Windowed
        self.LogInfo('SetDevice: tryAgain', tryAgain, 'fallback', fallback,
                     'keepSettings', keepSettings, 'hideTitle', hideTitle,
                     'deviceDict', device.__dict__)
        if not fallback:
            device = self.EnforceDeviceSettings(device)
        self.SanitizeDeviceTypes(device)
        change = self.CheckDeviceDifference(device, getChange=1)
        dev = trinity.device
        if not change and tryAgain and dev.DoesD3DDeviceExist():
            return True
        sm.ChainEvent('ProcessDeviceChange')
        pr = []
        for k, v in device.__dict__.items():
            pr.append((k, v))

        pr.sort()
        msg = 'SetDevice: Found a difference\n'
        for k, v in pr:
            extra = ''
            if k in change:
                extra = '   >> this one changed, it was ' + str(change[k][0])
            msg += '        ' + str(k) + ':    ' + str(v) + extra + '\n'

        self.LogInfo(msg)
        triapp = trinity.app
        if tryAgain:
            self.BackupSettings()
        try:
            triapp.hideTitle = hideTitle
            triapp.AdjustWindowForChange(
                device.Windowed,
                settings.public.device.Get('FixedWindow', False))
            msg = 'SetDevice - trying again\n'
            msg += 'Before:\n'
            msg += repr(device.__dict__) + '\n'
            if device.Adapter not in self.adapters:
                device.Adapter = self.adapters[0]
            device.__dict__.update(
                self.FixupPresentation(device.Adapter, device.__dict__))
            msg += 'After:\n'
            msg += repr(device.__dict__) + '\n'
            self.LogInfo(msg)
            dev = trinity.device
            dev.viewport.width = device.BackBufferWidth
            dev.viewport.height = device.BackBufferHeight
            while True:
                try:
                    triapp.ChangeDevice(device.Adapter, 0, 0, device.__dict__)
                    break
                except trinity.D3DERR_DEVICELOST:
                    blue.pyos.synchro.SleepWallclock(1000)

        except Exception as e:
            import traceback
            self.LogInfo(traceback.format_exc())
            self.LogInfo(repr(device.__dict__))
            if trinity.device.GetRenderingPlatformID() == 2:
                if prefs.HasKey('trinityPreferredPlatform') and prefs.GetValue(
                        'trinityPreferredPlatform') == 'dx11':
                    prefs.SetValue('trinityPreferredPlatform', 'dx9')
                    log.Quit(
                        'Failed to create device under DX11 - setting preferred platform to DX9'
                    )
                else:
                    log.Quit('Failed to create device under DX11')
            if tryAgain and self.settingsBackup:
                sys.exc_clear()
                self.LogInfo(
                    'SetDevice failed, trying again with backup settings')
                return self.SetDevice(self.settingsBackup,
                                      0,
                                      keepSettings=keepSettings)
            if not fallback:
                sys.exc_clear()
                self.LogInfo(
                    'SetDevice with backup settings failed, falling back to savemode'
                )
                set = self.GetSaveMode()
                return self.SetDevice(set,
                                      fallback=1,
                                      tryAgain=0,
                                      hideTitle=not set.Windowed,
                                      keepSettings=False)
            if muteExceptions:
                log.LogException()
                sys.exc_clear()
            self.LogInfo('SetDevice failed completely')
            return False

        if updateWindowPosition:
            self.UpdateWindowPosition(device)
        else:
            wr = triapp.GetWindowRect()
            triapp.SetWindowPos(wr.left, wr.top)
        sm.ScatterEvent('OnSetDevice')
        if uicore.desktop:
            uicore.desktop.UpdateSize()
        if keepSettings:
            set = self.GetSettings()
            keep = set.__dict__
            del keep['hDeviceWindow']
            gfxsettings.Set(gfxsettings.GFX_DEVICE_SETTINGS,
                            keep,
                            pending=False)
            self.settings.SaveSettings()
            self.LogInfo('Keeping device settings:', repr(keep))
            if self.IsWindowed(set):
                val = (set.BackBufferWidth, set.BackBufferHeight)
                gfxsettings.Set(gfxsettings.GFX_RESOLUTION_WINDOWED,
                                val,
                                pending=False)
            else:
                val = (set.BackBufferWidth, set.BackBufferHeight)
                gfxsettings.Set(gfxsettings.GFX_RESOLUTION_FULLSCREEN,
                                val,
                                pending=False)
                if userModified and self.resolutionBackup and self.resolutionBackup != val:
                    self.AskForConfirmation()
        sm.ScatterEvent('OnEndChangeDevice', change)
        unsupportedModels = ['SM_1_1', 'SM_2_0_LO', 'SM_2_0_HI']
        maxCardModel = trinity.GetMaxShaderModelSupported()
        if maxCardModel in unsupportedModels:
            message = localization.GetByLabel(
                '/Carbon/UI/Service/Device/ShaderModelNotSupportedMessage')
            title = localization.GetByLabel(
                '/Carbon/UI/Service/Device/ShaderModelNotSupportedTitle')
            blue.os.ShowErrorMessageBox(title, message)
            bluepy.Terminate('Shader Model version check failed')
        return True
Esempio n. 8
0
def Startup(servicesToRun,
            startInline=[],
            serviceManagerClass='ServiceManager'):
    import blue
    args = blue.pyos.GetArg()[1:]
    autoexec_common.LogStarting('Server')
    import nasty
    additionalScriptDirs = [
        'script:/../../../carbon/backend/script/',
        'script:/../../backend/script/'
    ]
    telemetryStarted = False
    for argument in args:
        if argument.startswith('/telemetryServer='):
            tmServer = str(argument[17:])
            print 'Telemetry starting up on %s (from cmdline)' % tmServer
            blue.statistics.StartTelemetry(tmServer)
            blue.pyos.taskletTimer.telemetryOn = 1
            telemetryStarted = True

    if not telemetryStarted:
        telemetryServer = prefs.GetValue('startupTelemetryServer', None)
        if telemetryServer is not None:
            print 'Telemetry starting up on %s (from prefs)' % telemetryServer
            blue.statistics.StartTelemetry(telemetryServer)
            blue.pyos.taskletTimer.telemetryOn = 1
            telemetryStarted = True
    if '/jessica' in args:
        additionalScriptDirs.extend(
            ['script:/../../../carbon/tools/jessica/script/'])
        useExtensions = '/noJessicaExtensions' not in args
        if useExtensions:
            additionalScriptDirs.extend([
                'script:/../../../carbon/tools/jessicaExtensions/script/',
                'script:/../../tools/jessicaExtensions/script/'
            ])
    nasty.Startup(additionalScriptDirs)
    import util
    import types
    print 'Nasty was started @ -', strx(
        util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - nastyStarted))
    import localization
    localization.LoadLanguageData()
    t0 = blue.os.GetWallclockTimeNow()
    t00 = t0
    import gc
    gc.disable()
    autoexec_common.LogStarted('Server')
    for i in args:
        if len(i) > 0 and i[0] != '-' and i[0] != '/':
            print 'Executing', strx(i)
            blue.pyos.ExecFile(i, globals())

    import service
    smClass = getattr(service, serviceManagerClass)
    srvMng = smClass(startInline=['DB2', 'machoNet'] + startInline)
    log.general.Log('Startup:  Starting Core Services...', log.LGNOTICE)
    print 'Core Services Starting @', strx(
        util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    startServices = ('machoNet', 'alert', 'objectCaching', 'debug')
    srvMng.Run(startServices)
    macho = sm.services['machoNet']
    DB2 = sm.services['DB2']
    dbzcluster = DB2.GetSchema('zcluster')
    dbzsystem = DB2.GetSchema('zsystem')
    while True:
        r = macho.AreTheseServicesRunning(startServices)
        if r is None:
            break
        log.general.Log('Startup:  Waiting for %s' % r, log.LGNOTICE)
        print 'Startup:  Waiting for', strx(r)
        blue.pyos.synchro.SleepWallclock(3000)

    print 'Database: ' + DB2.GetConnectionString()
    blue.os.SetAppTitle(
        '[%s %s.%s] %s %s %s.%s pid=%s' %
        (macho.GetNodeID(), macho.GetBasePortNumber(), boot.region.upper(),
         boot.codename, boot.role, boot.version, boot.build, blue.os.pid))
    startupInfo = dbzcluster.Cluster_StartupInfo()[0]
    dbClusterMode = startupInfo.clusterMode
    if dbClusterMode != prefs.clusterMode:
        s = 'DB / Server disagree on cluster mode.  Server says %s, but DB says %s' % (
            prefs.clusterMode, dbClusterMode)
        log.general.Log('###', log.LGERR)
        log.general.Log('### ' + s, log.LGERR)
        log.general.Log('###', log.LGERR)
        log.Quit(s)
    print '...'
    log.general.Log('Server Configuration:', log.LGNOTICE)
    log.general.Log(' NodeID: %s' % macho.GetNodeID(), log.LGNOTICE)
    log.general.Log(' NodeName: %s' % macho.GetNodeName(), log.LGNOTICE)
    log.general.Log(' Base Port: %s' % macho.GetBasePortNumber(), log.LGNOTICE)
    log.general.Log(' Region: %s' % boot.region.upper(), log.LGNOTICE)
    log.general.Log(' CodeName: %s' % boot.codename, log.LGNOTICE)
    log.general.Log(' Role: %s' % boot.role, log.LGNOTICE)
    log.general.Log(' Version: %s.%s' % (boot.version, boot.build),
                    log.LGNOTICE)
    log.general.Log(' ProcessID: %s' % blue.os.pid, log.LGNOTICE)
    log.general.Log(' clusterMode: %s' % startupInfo.clusterMode, log.LGNOTICE)
    log.general.Log(' Host: %s' % blue.pyos.GetEnv().get('COMPUTERNAME', '?'),
                    log.LGNOTICE)
    log.general.Log('Startup:  Synchronizing Clock with DB...', log.LGNOTICE)
    originalNow = blue.os.GetWallclockTimeNow()
    timediff = 0
    for i in xrange(20):
        now1 = blue.os.GetWallclockTimeNow()
        dbnow = dbzsystem.DateTime()[0].dateTime
        now2 = blue.os.GetWallclockTimeNow()
        now = (now1 + now2) / 2
        if abs(dbnow - now) < (i + 1) * const.SEC:
            break
        reason = 'DB / Server time horribly out of synch, DB says now=%s but server thinks now=%s' % (
            util.FmtDateEng(dbnow), util.FmtDateEng(now))
        log.general.Log(reason, 2)
        if prefs.clusterMode != 'LIVE':
            newnow = dbnow + (now2 - now1) / 2
            log.general.Log(
                'Resetting clock, setting time: ' + util.FmtDateEng(newnow),
                log.LGNOTICE)
            print 'Correcting clock to match DB ... advancing ', float(
                newnow - now2) / float(const.SEC), ' secs'
            t0 += newnow - now2
            blue.pyos.synchro.ResetClock(newnow)
        elif i < 10:
            log.general.Log('Retrying clock check to prevent node death',
                            log.LGERR)
            continue
        else:
            log.Quit(
                'DB / Server time horriby out of synch on live cluster.  Please perform a manual time synch.'
            )

    finalNow = blue.os.GetWallclockTimeNow()
    sm.ScatterEvent('OnTimeReset', originalNow, finalNow)
    nasty.nasty.OnTimeReset(originalNow, finalNow)
    log.general.Log(
        'Startup:  Marking myself as ready for connectivity establishment in zcluster.nodes',
        log.LGNOTICE)
    print 'Server Ready @', strx(
        util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    dbzcluster.Nodes_SetStatus(macho.GetNodeID(), -4)
    macho.SetStatusKeyValuePair('clusterStatus', -400)
    macho.SetStatusKeyValuePair('clusterStatusText',
                                'Ready for connectivity establishment')
    log.general.Log('Startup:  Establishing Connectivity...', log.LGNOTICE)
    print 'Establishing Connectivity @', strx(
        util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    if prefs.GetValue('skipConnectivity', 0):
        log.general.Log(
            'Startup:  skipConnectivity has been set, skipping connectivity tests...'
        )
    if not macho.IsResurrectedNode() and not prefs.GetValue(
            'skipConnectivity', 0):
        offsetClock = None
        if macho.GetNodeID() == macho.GetNodeFromAddress(
                const.cluster.SERVICE_POLARIS, 0):
            offsetClock = t0 - t00
        macho.readyToConnect = True
        startConnectivityCheck = blue.os.GetWallclockTime()
        while 1:
            try:
                log.general.Log('Startup:  Refreshing Connectivity...',
                                log.LGINFO)
                macho.RefreshConnectivity()
            except StandardError:
                log.general.Log('Startup:  Error refreshing connectivity...',
                                log.LGWARN)
                log.LogException(toConsole=1,
                                 toLogServer=1,
                                 severity=log.LGWARN)
                sys.exc_clear()

            blue.pyos.synchro.SleepWallclock(500)
            try:
                log.general.Log(
                    'Startup:  Performing cluster readiness test...',
                    log.LGINFO)
                now = blue.os.GetWallclockTimeNow()
                if now > startConnectivityCheck + LOG_CONNECTIVITY_ERROR_TIME:
                    log.general.Log(
                        'I have been checking the network connectivity for a long time.... Should we go and check out why?',
                        log.LGERR)
                    startConnectivityCheck = now
                ready = dbzcluster.Cluster_Ready(-4)
                if type(ready) == types.IntType:
                    if not ready:
                        print 'More Nodes Starting, pausing and retrying'
                        log.general.Log('Startup:  More nodes starting',
                                        log.LGNOTICE)
                        blue.pyos.synchro.SleepWallclock(5000)
                        continue
                elif ready[0].readyWhen is None:
                    log.Quit(
                        'The DB says that even I am not ready, but I have already passed my ready marker!  Shutting down.'
                    )
                else:
                    ready = ready[0]
                    until = ready.readyWhen + const.SEC * 70 * 5
                    if until > now:
                        s = min(70000 / 4,
                                (until - now) / const.SEC * 1000 + 1000)
                        log.general.Log(
                            'Startup:  Last startup was at %s.  Waiting %s before retrying, now=%s'
                            % (util.FmtDateEng(ready.readyWhen),
                               util.FmtDateEng(until), util.FmtDateEng(now)),
                            log.LGWARN)
                    else:
                        print 'Only %d nodes have registered themselves in zcluster.nodes and the safetytime has been passed by far...' % ready.nodeCount
                        log.general.Log(
                            'Startup:  Only %d nodes have registered in zcluster.nodes, but the safetytime has been passed by far...'
                            % ready.nodeCount, log.LGERR)
                        s = 70000
                    print 'Waiting ', s, ' millisecs prior to rechecking'
                    blue.pyos.synchro.SleepWallclock(s)
                    continue
                startupInfo = dbzcluster.Cluster_StartupInfo()[0]
                log.general.Log(
                    'Startup:  Connectivity test - calling all proxies...',
                    log.LGINFO)
                p = macho.session.ConnectToAllProxyServerServices(
                    'machoNet').ConnectivityTest(
                        offsetClock,
                        startupInfo.proxyNodeCount - 1,
                        startupInfo.serverNodeCount -
                        startupInfo.unexpectServerNodeCount,
                        uberMachoRaise=True)
                if len(p) != startupInfo.proxyNodeCount:
                    log.general.Log(
                        'Startup:  %d proxy nodes available, %d proxy nodes expected...'
                        % (len(p), startupInfo.proxyNodeCount), log.LGINFO)
                    print '%d proxy nodes available, %d proxy nodes expected...' % (
                        len(p), startupInfo.proxyNodeCount)
                    blue.pyos.synchro.SleepWallclock(500)
                    continue
                for isexception, nodeID, ret in p:
                    if not ret:
                        log.general.Log(
                            'Startup:  Proxy %d failed its connectivity test' %
                            nodeID, log.LGINFO)
                        raise UberMachoException(
                            'Proxy failed connectivity test')

                minimumProxyCount = prefs.GetValue(
                    'machoNet.minimumProxyCount', 0) or max(
                        1, int(startupInfo.proxyNodeCount * 0.8))
                if len(p) < minimumProxyCount:
                    print 'Too few proxy nodes succeeded in starting to make this run worthwhile'
                    log.general.Log(
                        'Too few proxy nodes succeeded in starting to make this run worthwhile',
                        log.LGERR)
                    log.general.Log(
                        'Minimum=%d, Actual=%d' % (minimumProxyCount, len(p)),
                        log.LGERR)
                    blue.pyos.synchro.SleepWallclock(10000)
                    continue
                log.general.Log(
                    'Startup:  Connectivity test - calling all sols...',
                    log.LGINFO)
                s = macho.session.ConnectToAllSolServerServices(
                    'machoNet').ConnectivityTest(
                        offsetClock,
                        startupInfo.proxyNodeCount,
                        startupInfo.serverNodeCount - 1 -
                        startupInfo.unexpectServerNodeCount,
                        uberMachoRaise=True)
                if len(
                        s
                ) != startupInfo.serverNodeCount - startupInfo.unexpectServerNodeCount:
                    log.general.Log(
                        'Startup:  %d server nodes available, %d server nodes expected...'
                        % (len(s), startupInfo.serverNodeCount), log.LGINFO)
                    print '%d server nodes available, %d server nodes expected...' % (
                        len(s), startupInfo.serverNodeCount)
                    blue.pyos.synchro.SleepWallclock(500)
                    continue
                for isexception, nodeID, ret in s:
                    if not ret:
                        log.general.Log(
                            'Startup:  Server %d failed its connectivity test'
                            % nodeID, log.LGINFO)
                        raise UberMachoException(
                            'Server failed connectivity test')

                minimumSolCount = prefs.GetValue(
                    'machoNet.minimumSolCount', 0) or max(
                        1, int(startupInfo.serverNodeCount * 0.8))
                if len(s) < minimumSolCount:
                    print 'Too few sol nodes succeeded in starting to make this run worthwhile'
                    log.general.Log(
                        'Too few sol nodes succeeded in starting to make this run worthwhile',
                        log.LGERR)
                    log.general.Log(
                        'Minimum=%d, Actual=%d' % (minimumSolCount, len(s)),
                        log.LGERR)
                    blue.pyos.synchro.SleepWallclock(10000)
                    continue
                break
            except UberMachoException as e:
                log.general.Log('Startup:  network connectivity not achieved',
                                log.LGINFO)
                sys.exc_clear()
            except UnMachoDestination as e:
                log.general.Log(
                    'Startup:  network connectivity not yet achieved (%s)' %
                    e.payload, 2)
                sys.exc_clear()
            except:
                log.LogException(
                    'Exception while pinging all proxies and sols')
                sys.exc_clear()

        print 'Connectivity achieved @', strx(
            util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    if 'waitForPremapping' in startupInfo.__columns__ and int(
            startupInfo.waitForPremapping) == 1:
        polarisID = macho.GetNodeFromAddress(const.cluster.SERVICE_POLARIS, 0)
        while macho.GetNodeID() != polarisID and dbzcluster.Nodes_Status(
                polarisID) < -3:
            print 'Waiting for Polaris to reach premapping stage.. sleeping 5 seconds..'
            blue.pyos.synchro.SleepWallclock(5000)

    if prefs.clusterMode in ('TEST', 'LIVE'):
        if macho.GetNodeID() == macho.GetNodeFromAddress(
                const.cluster.SERVICE_POLARIS, 0):
            if not macho.IsResurrectedNode():
                dbzcluster.Cluster_PreMap()
    log.general.Log(
        'Startup:  Marking myself as ready for distributed startup phase in zcluster.nodes',
        log.LGNOTICE)
    dbzcluster.Nodes_SetStatus(macho.GetNodeID(), -3)
    macho.SetStatusKeyValuePair('clusterStatus', -300)
    macho.SetStatusKeyValuePair('clusterStatusText',
                                'Ready for distributed setup')
    log.general.Log('Startup:  Priming address cache...', log.LGNOTICE)
    while 1:
        try:
            if macho.IsResurrectedNode():
                print 'Priming address cache directly @', strx(
                    util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() -
                                            t0))
                log.general.Log('Startup:  Priming address cache directly...',
                                log.LGINFO)
                macho.PrimeAddressCache(dbzcluster.Addresses_PrimeCache())
            elif macho.GetNodeID() == macho.GetNodeFromAddress(
                    const.cluster.SERVICE_POLARIS, 0):
                print 'Orchestrating priming of address cache @', strx(
                    util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() -
                                            t0))
                log.general.Log(
                    'Startup:  Orchestrating priming of address cache...',
                    log.LGINFO)
                macho.session.ConnectToAllSolServerServices(
                    'machoNet').PrimeAddressCache(
                        dbzcluster.Addresses_PrimeCache(), uberMachoRaise=True)
            else:
                print 'Waiting for completion of priming of address cache @', strx(
                    util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() -
                                            t0))
                if macho.GetNodeID() == macho.GetNodeFromAddress(
                        const.cluster.SERVICE_POLARIS, 0):
                    print "Polaris has died, and I have resumed it's responsibilities"
                    log.general.Log(
                        "Polaris has died, and I have resumed it's responsibilities",
                        log.LGERR)
                    continue
            break
        except:
            print 'Priming address cache failure, retrying...'
            log.LogException(
                'Exception while priming address cachep.  I am%s Polaris' %
                [' NOT', ''][macho.GetNodeID() == macho.GetNodeFromAddress(
                    const.cluster.SERVICE_POLARIS, 0)])
            sys.exc_clear()

    log.general.Log(
        'Startup:  Marking myself as ready for starting user service phase in zcluster.nodes',
        log.LGNOTICE)
    dbzcluster.Nodes_SetStatus(macho.GetNodeID(), -2)
    macho.SetStatusKeyValuePair('clusterStatus', -200)
    macho.SetStatusKeyValuePair('clusterStatusText', 'Starting user services')
    log.general.Log('Startup:  Starting User Services...', log.LGNOTICE)
    print 'User Services Starting @', strx(
        util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    blue.pyos.taskletTimer.Reset()
    sys.clearmemcontexts()
    blue.pyos.taskletTimer.active = 1
    srvMng.Run(servicesToRun)
    import ttimerutil
    timers = ttimerutil.TaskletSnapshot()
    blue.pyos.taskletTimer.active = 0
    sys.setmemcontextsactive(False)
    sys.clearmemcontexts()
    totalCPUTime = 0
    totalWallClockTime = 0
    for timer in timers:
        if timer.startswith('StartService::ServiceStartRun::') and timer.find(
                '^') == -1:
            serviceName = timer[31:]
            if serviceName in srvMng.startupTimes:
                serviceWallClockTime = srvMng.startupTimes[serviceName]
                totalWallClockTime += serviceWallClockTime
            else:
                serviceWallClockTime = -1
            serviceCPUTime = timers[timer].GetTime()
            totalCPUTime += serviceCPUTime
            log.general.Log(
                'Startup: Service %s took %.4fs wallclock and %.4fs cpu time to startup'
                % (serviceName, serviceWallClockTime, serviceCPUTime),
                log.LGNOTICE)

    log.general.Log(
        'Startup: Estimated serial execution time: %.4fs Total CPU time spent %.4fs '
        % (totalWallClockTime, totalCPUTime), log.LGNOTICE)
    log.general.Log(
        'Startup:  Marking myself as ready for cluster startup tests in zcluster.nodes',
        log.LGNOTICE)
    dbzcluster.Nodes_SetStatus(macho.GetNodeID(), -1)
    macho.SetStatusKeyValuePair('clusterStatus', -100)
    macho.SetStatusKeyValuePair('clusterStatusText', 'Startup test phase')
    if macho.IsResurrectedNode():
        loop = True
        while loop:
            loop = False
            try:
                ret = macho.AreTheseServicesRunning(startServices)
                if ret:
                    log.general.Log('Startup:  Waiting for %s' % ret,
                                    log.LGERR)
                    print 'Startup:  Waiting for %s' % strx(ret)
                    loop = True
            except:
                log.LogException(
                    'Exception while performing distributed startup.  I am%s Polaris'
                    %
                    [' NOT', ''][macho.GetNodeID() == macho.GetNodeFromAddress(
                        const.cluster.SERVICE_POLARIS, 0)])
                sys.exc_clear()
                log.Quit('Failed to determine service running state')

            if loop:
                blue.pyos.synchro.SleepWallclock(3000)

        macho.readyToConnect = True
        try:
            log.general.Log('Startup:  Refreshing Connectivity...',
                            log.LGNOTICE)
            macho.RefreshConnectivity()
        except StandardError:
            log.general.Log('Startup:  Error refreshing connectivity...',
                            log.LGWARN)
            log.LogException(toConsole=1, toLogServer=1, severity=log.LGWARN)
            sys.exc_clear()

        sm.ScatterEvent(
            'OnClusterStarting',
            macho.GetNodeFromAddress(const.cluster.SERVICE_POLARIS, 0))
    else:
        sent = False
        while not macho.clusterStartupPhase:
            if not sent and macho.GetNodeID() == macho.GetNodeFromAddress(
                    const.cluster.SERVICE_POLARIS, 0):
                print 'Cluster User Service Startup tests beginning @', strx(
                    util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() -
                                            t0))
                loop = True
                while loop:
                    loop = False
                    r = macho.session.ConnectToAllSolServerServices(
                        'machoNet').AreTheseServicesRunning(
                            startServices, uberMachoRaise=True)
                    for isexception, nodeID, ret in r:
                        if ret:
                            log.general.Log(
                                'Startup:  Node %d waiting for %s' %
                                (nodeID, ret), log.LGERR)
                            print 'Startup:  Node %d waiting for %s' % (
                                nodeID, strx(ret))
                            loop = True
                            break

                    if loop:
                        blue.pyos.synchro.SleepWallclock(3000)
                    if len(r) < minimumSolCount:
                        print 'Too few sol nodes succeeded in starting to make this run worthwhile'
                        raise UberMachoException(
                            'Too few sol nodes succeeded in starting to make this run worthwhile'
                        )

                print 'Broadcasting OnClusterStarting'
                log.general.Log('Startup:  Broadcasting OnClusterStarting',
                                log.LGNOTICE)
                macho.ClusterBroadcast('OnClusterStarting', macho.GetNodeID())
                sent = True
                blue.pyos.synchro.SleepWallclock(1000)
            else:
                print 'Waiting for OnClusterStarting...'
                log.general.Log('Startup:  Waiting for OnClusterStarting...',
                                log.LGNOTICE)
                blue.pyos.synchro.SleepWallclock(3000)

    log.general.Log(
        "Startup:  Marking myself as ready to rock'n'roll in zcluster.nodes",
        log.LGNOTICE)
    dbzcluster.Nodes_SetStatus(macho.GetNodeID(), 0)
    macho.SetStatusKeyValuePair('clusterStatus', -50)
    macho.SetStatusKeyValuePair('clusterStatusText', 'Ready in DB')
    print '--------------------------------------------------------------'
    if macho.GetNodeID() == macho.GetNodeFromAddress(
            const.cluster.SERVICE_POLARIS, 0):
        print 'Polaris - Cluster ready @', strx(
            util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    else:
        print 'Not Polaris - Server ready @', strx(
            util.FmtTimeIntervalEng(blue.os.GetWallclockTimeNow() - t0))
    ram = blue.win32.GetProcessMemoryInfo()['PagefileUsage'] / 1024 / 1024
    msg = 'Memory Usage (virtual mem) : %sMb upon startup' % ram
    macho.LogNotice(msg)
    print msg
    sm.ScatterEvent('OnServerStartupCompleted')
    macho.SetStatusKeyValuePair('clusterStatus', 0)
    macho.SetStatusKeyValuePair('clusterStatusText', 'Ready')
    if bluepy.IsRunningStartupTest():
        bluepy.TerminateStartupTest()
Esempio n. 9
0
adapters = blue.classes.CreateInstance('trinity.Tr2VideoAdapters')
try:
    adapterInfo = adapters.GetAdapterInfo(adapters.DEFAULT_ADAPTER)
    blue.SetCrashKeyValues(u'GPU_Description', adapterInfo.description)
    blue.SetCrashKeyValues(u'GPU_Driver', unicode(adapterInfo.driver))
    blue.SetCrashKeyValues(u'GPU_Driver_Version',
                           unicode(adapterInfo.driverVersion))
    blue.SetCrashKeyValues(u'GPU_VendorId', unicode(adapterInfo.vendorID))
    blue.SetCrashKeyValues(u'GPU_DeviceId', unicode(adapterInfo.deviceID))
except RuntimeError:
    pass
except SystemError:
    if platform == 'dx11':
        _SetPreferredPlatform('dx9')
        log.Quit(
            'Video card may not support DX11 - setting preferred platform to DX9'
        )

device = blue.classes.CreateInstance('trinity.TriDevice')
renderContext = device.GetRenderContext()
app = blue.classes.CreateInstance('triui.App')
if hasattr(blue, 'CcpStatistics'):
    statistics = blue.CcpStatistics()
from trinity.renderJob import CreateRenderJob
from trinity.renderJobUtils import *

renderJobs = trinity.renderJob.RenderJobs()
device.SetRenderJobs(renderJobs)


def IsFpsEnabled():