Esempio n. 1
0
    def setup(self, cfg):

        BasePlugin.setup(self, cfg)

        self.data_root = os.path.join(os.path.dirname(__file__), "data", "web")

        app_cfg = self.service.cfg.get("app")
        mongo_server = app_cfg.get("general").get("mongo_server", "localhost")
        mongo_port = app_cfg.get("general").get("mongo_port", 27017)

        self.app = Flask(__name__,
                         template_folder=os.path.join(self.data_root,
                                                      "templates"))

        static = Static.File(os.path.join(self.data_root, "static"))
        static.processors = {
            ".py": script.PythonScript,
            ".rpy": script.ResourceScript
        }
        static.indexNames = ["index.rpy", "index.html", "index.htm"]

        root = Root(self)
        root.putChild("static", static)

        site = WebSocketSite(root)
        WebSocketProtocol.plugin = self
        site.addHandler("/ws", WebSocketProtocol)

        self.service.reactor.listenTCP(self.cfg.get("port", 8901), site)

        @self.app.route("/")
        def index():
            return redirect("/admin/")

        @self.app.route("/reconfigure/")
        def reconfigure():
            self.publish(["c:mhub", "i:reconfigure"])
            return redirect("/")

        @self.app.route("/admin/")
        def admin():
            ctx = self.context_processor()
            return render_template("admin/home.html", **ctx)

        @self.app.route("/admin/db/")
        def admin_db():
            return redirect("/admin/db/list/")

        @self.app.route("/admin/db/list/")
        def admin_db_list():

            ctx = self.context_processor()

            store_items = self.service.db_find("store", {})
            ctx["store_items"] = list(store_items)

            return render_template("admin/db/list.html", **ctx)

        @self.app.route("/admin/db/edit/<item_id>", methods=["GET", "POST"])
        def admin_db_edit(item_id):

            ctx = self.context_processor()
            item = self.service.db_find_one("store", item_id)
            form = _build_db_form(item)
            form.populate_obj(request.form)
            ctx["item"] = item
            ctx["form"] = form

            if request.method == "POST" and form.validate():
                return redirect("/admin/db/list/")

            return render_template("admin/db/edit.html", **ctx)

        def _build_db_form(item):

            fields = dict()

            for k, v in item.iteritems():
                fields[k] = TextField()

            form = BaseForm(fields=fields)

            return form
Esempio n. 2
0
    def getResourceFor(self, request):
        uri = request.uri
        uri = uri.split("?", 1)[0]
        uri = uri.split("#", 1)[0]
        if uri.startswith('/json') or uri.startswith('/dump'):
            resource.prepath = ['app']
            result = resource.getChildForRequest(self.resource, request)
        elif uri.startswith('/payment'):
            resource.prepath = ['payment']
            result = resource.getChildForRequest(self.resource, request)
        elif uri == '/favicon.ico':
            return
        else:
            pathParts = uri.split('/')
            version = pathParts[1]

            if pathParts[2].startswith('index.'):
                print("-> index")
                contentType = 'text/html'
                absoluteFilePath = os.path.join(projectTargetDir(), 'dev',
                                                version, pathParts[2])
                #				print("INDEX.HTML absolute path " + str(absoluteFilePath))
                result = static.File(absoluteFilePath, contentType)
            elif pathParts[2].endswith('.webapp'):
                print("-> webapp")
                contentType = 'application/x-web-app-manifest+json'
                absoluteFilePath = os.path.join(projectBaseDir(), 'frontend',
                                                version, 'properties',
                                                pathParts[2])
                result = static.File(absoluteFilePath, contentType)
#			elif pathParts[2].endswith('.appcache'):
            elif pathParts[2].endswith('.appcache_disabled'):
                print("-> appcache")
                contentType = 'text/cache-manifest'
                absoluteFilePath = os.path.join(projectBaseDir(), 'frontend',
                                                version, 'properties',
                                                pathParts[2])
                fileContent = codecs.open(absoluteFilePath, 'r',
                                          'utf-8').read()
                #				fileContent = fileContent.replace('@application.version@', str(uuid.uuid1()))
                fileContent = fileContent.replace('@application.version@',
                                                  str(round(time.time())))
                result = static.Data(str(fileContent), contentType)
            else:
                #	http://homer.local:8888/beta/css/clipperz/images/loginInfoBackground.png
                #	pathParts: ['', 'beta', 'css', 'clipperz', 'images', 'loginInfoBackground.png']
                try:
                    imagePathIndex = pathParts.index('images')
                    resourceType = 'images'
                    for _ in range(2, imagePathIndex):
                        del pathParts[2]
                except:
                    resourceType = pathParts[2]

                basePath = projectBaseDir() + '/frontend'
                if resourceType == 'images':
                    fileExtension = os.path.splitext(uri)[1]
                    if fileExtension == '.png':
                        #						print("-> image - png")
                        contentType = 'image/png'
                    elif fileExtension == '.jpg':
                        #						print("-> image - jpg")
                        contentType = 'image/jpeg'
                    elif fileExtension == '.gif':
                        #						print("-> image - gif")
                        contentType = 'image/gif'
                    else:
                        print "ERROR - unknown image extension: " + fileExtension

                    absoluteFilePath = basePath + '/'.join(pathParts)
                else:
                    resourceType = pathParts[2]

                    if resourceType == 'css':
                        #						print("-> css")
                        contentType = 'text/css'
                    elif resourceType == 'js':
                        #						print("-> js")
                        contentType = 'text/javascript'
                    else:
                        #						print("-> text/html")
                        contentType = 'text/html'

                    absoluteFilePath = basePath + uri

                result = static.File(absoluteFilePath, contentType)


#		print("RESULT\n" + str(result))
        return result
Esempio n. 3
0
def makeService(options):
    # primary setup
    application = service.Application(meta.display_name)
    services = service.IServiceCollection(application)

    # setup message server
    serverFactory = ServerFactory()
    serverFactory.protocol = Listener
    serverFactory.publisher = PublisherFactory()

    msgServer = internet.TCPServer(config.listener.port, serverFactory)
    msgServer.setName(config.listener.servicename)
    msgServer.setServiceParent(services)

    # setup IRC message client
    if config.irc.sslEnabled:
        msgService = internet.SSLClient(config.irc.server, config.irc.port,
                                        serverFactory.publisher,
                                        ClientContextFactory())
    else:
        msgService = internet.TCPClient(config.irc.server, config.irc.port,
                                        serverFactory.publisher)
    msgService.setName(config.irc.servicename)
    msgService.setServiceParent(services)

    # setup IRC log client
    logger = LoggerFactory(config.irc.server, config.log.channels)
    logService = internet.TCPClient(config.irc.server, config.irc.port, logger)
    logService.setName(config.log.servicename)
    logService.setServiceParent(services)

    # setuplog rotator
    rotService = internet.TimerService(config.log.rotate.checkInterval,
                                       logger.rotateLogs, logService)
    rotService.setName(config.log.rotate.servicename)
    rotService.setServiceParent(services)

    # setup log file web server
    webroot = static.File(config.log.http.docRoot)
    if config.log.http.vhostEnabled:
        vResource = vhost.VHostMonsterResource()
        webroot.putChild('vhost', vResource)
    if config.log.http.auth == 'basic':
        guarded = auth.guardResourceWithBasicAuth(webroot,
                                                  config.log.http.realm,
                                                  config.log.http.users)
        site = server.Site(guarded)
    else:
        site = server.Site(webroot)
    webserver = internet.TCPServer(config.log.http.port, site)
    webserver.setName(config.log.http.servicename)
    webserver.setServiceParent(services)

    # setup ssh access to a Python shell
    interpreterType = dreamssh_const.PYTHON
    sshFactory = getShellFactory(interpreterType,
                                 app=application,
                                 services=services)
    sshserver = internet.TCPServer(config.ssh.port, sshFactory)
    sshserver.setName(config.ssh.servicename)
    sshserver.setServiceParent(services)
    return services
Esempio n. 4
0
    def makeService(self, options):
        """
        Construct a TCPServer from a factory defined in myproject.
        """
        observer = log.FileLogObserver(sys.stderr)
        log.addObserver(observer.emit)

        from fmspy.config import config

        if options['rtmp-port'] != 1935:
            config.set('RTMP', 'port', options['rtmp-port'])
        if options['rtmp-interface'] != '':
            config.set('RTMP', 'interface', options['rtmp-interface'])

        from twisted.application import internet, service
        from fmspy.rtmp.protocol import RTMPServerFactory

        s = service.MultiService()

        h = internet.TCPServer(config.getint('RTMP', 'port'),
                               RTMPServerFactory(),
                               config.getint('RTMP', 'backlog'),
                               config.get('RTMP', 'interface'))
        h.setServiceParent(s)

        log.msg('RTMP server at port %d.' % config.getint('RTMP', 'port'))

        from fmspy.application import app_factory

        def appsLoaded(_):
            log.msg("Applications loaded.")

        def appsError(fail):
            log.err(fail, "Applications failed to load.")

        app_factory.load_applications().addCallbacks(appsLoaded, appsError)

        if config.getboolean('HTTP', 'enabled'):
            from twisted.web import server, static, resource

            root = resource.Resource()

            if config.getboolean('HTTP', 'examples-enabled'):
                examples_path = 'examples/'

                try:
                    from pkg_resources import Requirement, resource_filename, DistributionNotFound

                    try:
                        examples_path = resource_filename(
                            Requirement.parse("fmspy"), "share/examples")
                    except DistributionNotFound:
                        pass

                except ImportError:
                    pass

                root.putChild('examples', static.File(examples_path))

                h = internet.TCPServer(config.getint('HTTP', 'port'),
                                       server.Site(root))
                h.setServiceParent(s)

                log.msg('HTTP server at port %d.' %
                        config.getint('HTTP', 'port'))

        log.removeObserver(observer.emit)

        return s
Esempio n. 5
0
def main():
    try:
        import twisted
    except ImportError:
        print "Orbited requires Twisted, which is not installed. See http://twistedmatrix.com/trac/ for installation instructions."
        sys.exit(1)
    import platform
    if platform.system() == "Windows":
        try:
            import win32api
        except ImportError:
            print "Orbited for Windows requires the Python for Windows Extensions, which are not installed. See http://python.net/crew/mhammond/win32/ for installation instructions."
            sys.exit(1)
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-c",
                      "--config",
                      dest="config",
                      default=None,
                      help="path to configuration file")
    parser.add_option("-v",
                      "--version",
                      dest="version",
                      action="store_true",
                      default=False,
                      help="print Orbited version")
    parser.add_option("-p",
                      "--profile",
                      dest="profile",
                      action="store_true",
                      default=False,
                      help="run Orbited with a profiler")
    parser.add_option(
        "-q",
        "--quickstart",
        dest="quickstart",
        action="store_true",
        default=False,
        help="run Orbited on port 8000 and MorbidQ on port 61613")

    (options, args) = parser.parse_args()

    if args:
        print 'the "orbited" command does not accept positional arguments. type "orbited -h" for options.'
        sys.exit(1)

    if options.version:
        print "Orbited version: %s" % (version, )
        sys.exit(0)

    if options.quickstart:
        config.map['[listen]'].append('http://:8000')
        config.map['[listen]'].append('stomp://:61613')
        config.map['[access]'][('localhost', 61613)] = ['*']
        print "Quickstarting Orbited"
    else:
        # load configuration from configuration
        # file and from command line arguments.
        config.setup(options=options)

    logging.setup(config.map)

    # we can now safely get loggers.
    global logger
    logger = logging.get_logger('orbited.start')

    # NB: we need to install the reactor before using twisted.
    reactor_name = config.map['[global]'].get('reactor')
    if reactor_name:
        install = _import('twisted.internet.%sreactor.install' % reactor_name)
        install()
        logger.info('using %s reactor' % reactor_name)

    ############
    # This crude garbage corrects a bug in twisted
    #   Orbited ticket: http://orbited.org/ticket/111
    #   Twisted ticket: http://twistedmatrix.com/trac/ticket/2447
    import twisted.web.http
    twisted.web.http.HTTPChannel.setTimeout = lambda self, arg: None
    twisted.web.http.HTTPChannel.resetTimeout = lambda self: None
    ############

    from twisted.internet import reactor
    from twisted.web import resource
    from twisted.web import server
    from twisted.web import static
    import orbited.system

    root = resource.Resource()
    static_files = static.File(
        os.path.join(os.path.dirname(__file__), 'static'))
    root.putChild('static', static_files)
    root.putChild('system', orbited.system.SystemResource())

    if config.map['[test]']['stompdispatcher.enabled'] == '1':
        logger.info('stompdispatcher enabled')

    #static_files.putChild('orbited.swf', static.File(os.path.join(os.path.dirname(__file__), 'flash', 'orbited.swf')))
    site = server.Site(root)

    _setup_protocols(root)
    _setup_static(root, config.map)
    start_listening(site, config.map, logger)

    # switch uid and gid to configured user and group.
    if os.name == 'posix' and os.getuid() == 0:
        user = config.map['[global]'].get('user')
        group = config.map['[global]'].get('group')
        if user:
            import pwd
            import grp
            try:
                pw = pwd.getpwnam(user)
                uid = pw.pw_uid
                if group:
                    gr = grp.getgrnam(group)
                    gid = gr.gr_gid
                else:
                    gid = pw.pw_gid
                    gr = grp.getgrgid(gid)
                    group = gr.gr_name
            except Exception, e:
                logger.error('Aborting; Unknown user or group: %s' % e)
                sys.exit(1)
            logger.info('switching to user %s (uid=%d) and group %s (gid=%d)' %
                        (user, uid, group, gid))
            os.setgid(gid)
            os.setuid(uid)
        else:
            logger.error(
                'Aborting; You must define a user (and optionally a group) in the configuration file.'
            )
            sys.exit(1)
Esempio n. 6
0
    isLeaf = True

    def render_GET(self, request):
        _log.info("Serving request %s" % request)
        reactor.stop()
        return ""

    def render_POST(self, request):
        _log.info("Serving request %s" % request)
        sys.stdout.write(request.content.read())
        sys.stdout.flush()
        reactor.stop()
        return 'OK'


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='python twisted_http_server.py web_root')
    parser.add_argument('web_root')
    parser.add_argument('--port', type=int, default=0)
    parser.add_argument('--interface', default='')
    args = parser.parse_args()
    web_root = static.File(args.web_root)
    serverControl = ServerControl()
    web_root.putChild('shutdown', serverControl)
    web_root.putChild('report', serverControl)
    reactor.listenTCP(args.port,
                      server.Site(web_root),
                      interface=args.interface)
    reactor.run()
Esempio n. 7
0
from twisted.web import vhost, static, script

default = static.Data('text/html', '')
default.putChild('vhost', vhost.VHostMonsterResource())
resource = vhost.NameVirtualHost()
resource.default = default
file = static.File('static')
file.processors = {'.rpy': script.ResourceScript}
resource.addHost('twistedmatrix.com', file)
Esempio n. 8
0
    @exported_value(type=Timestamp())
    def get_last_heard_time(self):
        return self.__last_heard_time

    @exported_value(type=unicode)
    def get_call(self):
        return self.__call

    @exported_value(type=int)
    def get_ident(self):
        return self.__ident

    @exported_value(type=unicode)
    def get_aircraft_type(self):
        return self.__aircraft_type

    @exported_value(type=Track)
    def get_track(self):
        return self.__track


plugin_mode = ModeDef(mode='MODE-S',
                      label='Mode S',
                      demod_class=ModeSDemodulator,
                      available=_available)
plugin_client = ClientResourceDef(
    key=__name__,
    resource=static.File(os.path.join(os.path.split(__file__)[0], 'client')),
    load_js_path='mode_s.js')
Esempio n. 9
0
 def wchild_preview(self, request):
     return static.File(self.image)
Esempio n. 10
0
            else:
                del dic[key]
        linesXpath = removeDuplicates(linesXpath)
        tmplate = setXSLTemplate(linesXpath, dic)
        global next_button_xpath
        crawlData(pageUrl, tmplate, 'CLICK', ''.join(next_button_xpath))
        return 'success'


class nextButton(Resource):
    def render_GET(self, request):
        global next_button_xpath
        global browser
        next_button_xpath = request.args['nextButtonXpath']
        print next_button_xpath

        return 'success'


resource = Resource()

resource = static.File('../web')
resource.putChild('proxy', ProxyResource())
resource.putChild('nextButton', nextButton())
resource.putChild('page', PageResource())
resource.putChild('selected', selectedData())
resource.putChild('crawler.html', File("../web/crawler.html"))

reactor.listenTCP(PORT, server.Site(resource))
reactor.run()
Esempio n. 11
0
    EVENNIA.services.addService(amp_service)

if WEBSERVER_ENABLED:

    # Start a django-compatible webserver.

    from twisted.python import threadpool
    from evennia.server.webserver import DjangoWebRoot, WSGIWebServer, Website

    # start a thread pool and define the root url (/) as a wsgi resource
    # recognized by Django
    threads = threadpool.ThreadPool(minthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[0]),
                                    maxthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[1]))
    web_root = DjangoWebRoot(threads)
    # point our media resources to url /media
    web_root.putChild("media", static.File(settings.MEDIA_ROOT))
    # point our static resources to url /static
    web_root.putChild("static", static.File(settings.STATIC_ROOT))

    if WEB_PLUGINS_MODULE:
        # custom overloads
        web_root = WEB_PLUGINS_MODULE.at_webserver_root_creation(web_root)

    web_site = Website(web_root, logPath=settings.HTTP_LOG_FILE)

    for proxyport, serverport in WEBSERVER_PORTS:
        # create the webserver (we only need the port for this)
        webserver = WSGIWebServer(threads, serverport, web_site, interface='127.0.0.1')
        webserver.setName('EvenniaWebServer%s' % serverport)
        EVENNIA.services.addService(webserver)
Esempio n. 12
0
def get_web_root(tracker, bitcoind_work, get_current_txouts, datadir_path, net, get_stale_counts, my_pubkey_hash, local_rate_monitor, worker_fee, p2p_node, my_share_hashes, pseudoshare_received, share_received, best_share_var, bitcoin_warning_var, traffic_happened):
    start_time = time.time()
    
    web_root = resource.Resource()
    
    def get_users():
        height, last = tracker.get_height_and_last(best_share_var.value)
        weights, total_weight, donation_weight = tracker.get_cumulative_weights(best_share_var.value, min(height, 720), 65535*2**256)
        res = {}
        for script in sorted(weights, key=lambda s: weights[s]):
            res[bitcoin_data.script2_to_address(script, net.PARENT)] = weights[script]/total_weight
        return res
    
    def get_current_scaled_txouts(scale, trunc=0):
        txouts = get_current_txouts()
        total = sum(txouts.itervalues())
        results = dict((script, value*scale//total) for script, value in txouts.iteritems())
        if trunc > 0:
            total_random = 0
            random_set = set()
            for s in sorted(results, key=results.__getitem__):
                if results[s] >= trunc:
                    break
                total_random += results[s]
                random_set.add(s)
            if total_random:
                winner = math.weighted_choice((script, results[script]) for script in random_set)
                for script in random_set:
                    del results[script]
                results[winner] = total_random
        if sum(results.itervalues()) < int(scale):
            results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
        return results
    
    def get_patron_sendmany(total=None, trunc='0.01'):
        if total is None:
            return 'need total argument. go to patron_sendmany/<TOTAL>'
        total = int(float(total)*1e8)
        trunc = int(float(trunc)*1e8)
        return json.dumps(dict(
            (bitcoin_data.script2_to_address(script, net.PARENT), value/1e8)
            for script, value in get_current_scaled_txouts(total, trunc).iteritems()
            if bitcoin_data.script2_to_address(script, net.PARENT) is not None
        ))
    
    def get_local_rates():
        miner_hash_rates = {}
        miner_dead_hash_rates = {}
        datums, dt = local_rate_monitor.get_datums_in_last()
        for datum in datums:
            miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt
            if datum['dead']:
                miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt
        return miner_hash_rates, miner_dead_hash_rates
    
    def get_global_stats():
        # averaged over last hour
        lookbehind = 3600//net.SHARE_PERIOD
        if tracker.get_height(best_share_var.value) < lookbehind:
            return None
        
        nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(tracker, best_share_var.value, lookbehind)
        stale_prop = p2pool_data.get_average_stale_prop(tracker, best_share_var.value, lookbehind)
        return dict(
            pool_nonstale_hash_rate=nonstale_hash_rate,
            pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
            pool_stale_prop=stale_prop,
            min_difficulty=bitcoin_data.target_to_difficulty(tracker.items[best_share_var.value].max_target),
        )
    
    def get_local_stats():
        lookbehind = 3600//net.SHARE_PERIOD
        if tracker.get_height(best_share_var.value) < lookbehind:
            return None
        
        global_stale_prop = p2pool_data.get_average_stale_prop(tracker, best_share_var.value, lookbehind)
        
        my_unstale_count = sum(1 for share in tracker.get_chain(best_share_var.value, lookbehind) if share.hash in my_share_hashes)
        my_orphan_count = sum(1 for share in tracker.get_chain(best_share_var.value, lookbehind) if share.hash in my_share_hashes and share.share_data['stale_info'] == 'orphan')
        my_doa_count = sum(1 for share in tracker.get_chain(best_share_var.value, lookbehind) if share.hash in my_share_hashes and share.share_data['stale_info'] == 'doa')
        my_share_count = my_unstale_count + my_orphan_count + my_doa_count
        my_stale_count = my_orphan_count + my_doa_count
        
        my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
        
        my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
            for share in tracker.get_chain(best_share_var.value, lookbehind - 1)
            if share.hash in my_share_hashes)
        actual_time = (tracker.items[best_share_var.value].timestamp -
            tracker.items[tracker.get_nth_parent_hash(best_share_var.value, lookbehind - 1)].timestamp)
        share_att_s = my_work / actual_time
        
        miner_hash_rates, miner_dead_hash_rates = get_local_rates()
        (stale_orphan_shares, stale_doa_shares), shares, _ = get_stale_counts()
        
        return dict(
            my_hash_rates_in_last_hour=dict(
                note="DEPRECATED",
                nonstale=share_att_s,
                rewarded=share_att_s/(1 - global_stale_prop),
                actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
            ),
            my_share_counts_in_last_hour=dict(
                shares=my_share_count,
                unstale_shares=my_unstale_count,
                stale_shares=my_stale_count,
                orphan_stale_shares=my_orphan_count,
                doa_stale_shares=my_doa_count,
            ),
            my_stale_proportions_in_last_hour=dict(
                stale=my_stale_prop,
                orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
                dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
            ),
            miner_hash_rates=miner_hash_rates,
            miner_dead_hash_rates=miner_dead_hash_rates,
            efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
            efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
            peers=dict(
                incoming=sum(1 for peer in p2p_node.peers.itervalues() if peer.incoming),
                outgoing=sum(1 for peer in p2p_node.peers.itervalues() if not peer.incoming),
            ),
            shares=dict(
                total=shares,
                orphan=stale_orphan_shares,
                dead=stale_doa_shares,
            ),
            uptime=time.time() - start_time,
            attempts_to_share=bitcoin_data.target_to_average_attempts(tracker.items[best_share_var.value].max_target),
            attempts_to_block=bitcoin_data.target_to_average_attempts(bitcoind_work.value['bits'].target),
            block_value=bitcoind_work.value['subsidy']*1e-8,
            warnings=p2pool_data.get_warnings(tracker, best_share_var.value, net, bitcoin_warning_var.value, bitcoind_work.value),
        )
    
    class WebInterface(deferred_resource.DeferredResource):
        def __init__(self, func, mime_type='application/json', args=()):
            deferred_resource.DeferredResource.__init__(self)
            self.func, self.mime_type, self.args = func, mime_type, args
        
        def getChild(self, child, request):
            return WebInterface(self.func, self.mime_type, self.args + (child,))
        
        @defer.inlineCallbacks
        def render_GET(self, request):
            request.setHeader('Content-Type', self.mime_type)
            request.setHeader('Access-Control-Allow-Origin', '*')
            res = yield self.func(*self.args)
            defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
    
    web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(tracker, best_share_var.value, 720)/(1-p2pool_data.get_average_stale_prop(tracker, best_share_var.value, 720))))
    web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(tracker.items[best_share_var.value].max_target)))
    web_root.putChild('users', WebInterface(get_users))
    web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, net.PARENT), prop) for ph, prop in
        p2pool_data.get_user_stale_props(tracker, best_share_var.value, tracker.get_height(best_share_var.value)).iteritems())))
    web_root.putChild('fee', WebInterface(lambda: worker_fee))
    web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, net.PARENT), value/1e8) for script, value in get_current_txouts().iteritems())))
    web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
    web_root.putChild('global_stats', WebInterface(get_global_stats))
    web_root.putChild('local_stats', WebInterface(get_local_stats))
    web_root.putChild('peer_addresses', WebInterface(lambda: ['%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port) for peer in p2p_node.peers.itervalues()]))
    web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
        dict([(a, (yield b)) for a, b in
            [(
                '%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
                defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
                    min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
                ))()
            ) for peer in list(p2p_node.peers.itervalues())]
        ])
    ))))
    web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in p2p_node.peers.itervalues())))
    web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT)))
    web_root.putChild('recent_blocks', WebInterface(lambda: [dict(ts=s.timestamp, hash='%064x' % s.header_hash) for s in tracker.get_chain(best_share_var.value, 24*60*60//net.SHARE_PERIOD) if s.pow_hash <= s.header['bits'].target]))
    web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
    web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(tracker, best_share_var.value, 720, rates=True)))
    
    new_root = resource.Resource()
    web_root.putChild('web', new_root)
    
    stat_log = []
    if os.path.exists(os.path.join(datadir_path, 'stats')):
        try:
            with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
                stat_log = json.loads(f.read())
        except:
            log.err(None, 'Error loading stats:')
    def update_stat_log():
        while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
            stat_log.pop(0)
        
        lookbehind = 3600//net.SHARE_PERIOD
        if tracker.get_height(best_share_var.value) < lookbehind:
            return None
        
        global_stale_prop = p2pool_data.get_average_stale_prop(tracker, best_share_var.value, lookbehind)
        (stale_orphan_shares, stale_doa_shares), shares, _ = get_stale_counts()
        miner_hash_rates, miner_dead_hash_rates = get_local_rates()
        
        stat_log.append(dict(
            time=time.time(),
            pool_hash_rate=p2pool_data.get_pool_attempts_per_second(tracker, best_share_var.value, lookbehind)/(1-global_stale_prop),
            pool_stale_prop=global_stale_prop,
            local_hash_rates=miner_hash_rates,
            local_dead_hash_rates=miner_dead_hash_rates,
            shares=shares,
            stale_shares=stale_orphan_shares + stale_doa_shares,
            stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
            current_payout=get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-8,
            peers=dict(
                incoming=sum(1 for peer in p2p_node.peers.itervalues() if peer.incoming),
                outgoing=sum(1 for peer in p2p_node.peers.itervalues() if not peer.incoming),
            ),
            attempts_to_share=bitcoin_data.target_to_average_attempts(tracker.items[best_share_var.value].max_target),
            attempts_to_block=bitcoin_data.target_to_average_attempts(bitcoind_work.value['bits'].target),
            block_value=bitcoind_work.value['subsidy']*1e-8,
        ))
        
        with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
            f.write(json.dumps(stat_log))
    task.LoopingCall(update_stat_log).start(5*60)
    new_root.putChild('log', WebInterface(lambda: stat_log))
    
    def get_share(share_hash_str):
        if int(share_hash_str, 16) not in tracker.items:
            return None
        share = tracker.items[int(share_hash_str, 16)]
        
        return dict(
            parent='%064x' % share.previous_hash,
            children=['%064x' % x for x in sorted(tracker.reverse.get(share.hash, set()), key=lambda sh: -len(tracker.reverse.get(sh, set())))], # sorted from most children to least children
            local=dict(
                verified=share.hash in tracker.verified.items,
                time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
                peer_first_received_from=share.peer.addr if share.peer is not None else None,
            ),
            share_data=dict(
                timestamp=share.timestamp,
                target=share.target,
                max_target=share.max_target,
                payout_address=bitcoin_data.script2_to_address(share.new_script, net.PARENT),
                donation=share.share_data['donation']/65535,
                stale_info=share.share_data['stale_info'],
                nonce=share.share_data['nonce'],
                desired_version=share.share_data['desired_version'],
            ),
            block=dict(
                hash='%064x' % share.header_hash,
                header=dict(
                    version=share.header['version'],
                    previous_block='%064x' % share.header['previous_block'],
                    merkle_root='%064x' % share.header['merkle_root'],
                    timestamp=share.header['timestamp'],
                    target=share.header['bits'].target,
                    nonce=share.header['nonce'],
                ),
                gentx=dict(
                    hash='%064x' % share.gentx_hash,
                    coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
                    value=share.share_data['subsidy']*1e-8,
                ),
                txn_count_range=[len(share.other_txs), len(share.other_txs)] if share.other_txs is not None else 1 if len(share.merkle_link['branch']) == 0 else [2**len(share.merkle_link['branch'])//2+1, 2**len(share.merkle_link['branch'])],
            ),
        )
    new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
    new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in tracker.heads]))
    new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in tracker.verified.heads]))
    new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in tracker.tails for x in tracker.reverse.get(t, set())]))
    new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in tracker.verified.tails for x in tracker.verified.reverse.get(t, set())]))
    new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % best_share_var.value))
    def get_share_data(share_hash_str):
        if int(share_hash_str, 16) not in tracker.items:
            return ''
        share = tracker.items[int(share_hash_str, 16)]
        return p2pool_data.share_type.pack(share.as_share1a())
    new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
    new_root.putChild('currency_info', WebInterface(lambda: dict(
        symbol=net.PARENT.SYMBOL,
        block_explorer_url_prefix=net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
        address_explorer_url_prefix=net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
    )))
    new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
    
    hd_path = os.path.join(datadir_path, 'graph_db')
    hd_data = _atomic_read(hd_path)
    hd_obj = {}
    if hd_data is not None:
        try:
            hd_obj = json.loads(hd_data)
        except Exception:
            log.err(None, 'Error reading graph database:')
    dataview_descriptions = {
        'last_hour': graph.DataViewDescription(150, 60*60),
        'last_day': graph.DataViewDescription(300, 60*60*24),
        'last_week': graph.DataViewDescription(300, 60*60*24*7),
        'last_month': graph.DataViewDescription(300, 60*60*24*30),
        'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
    }
    def build_pool_rates(ds_name, ds_desc, dv_name, dv_desc, obj):
        if not obj:
            last_bin_end = 0
            bins = dv_desc.bin_count*[{}]
        else:
            pool_rate = obj['pool_rate'][dv_name]
            pool_stale_rate = obj['pool_stale_rate'][dv_name]
            last_bin_end = max(pool_rate['last_bin_end'], pool_stale_rate['last_bin_end'])
            bins = dv_desc.bin_count*[{}]
            def get_value(obj, t):
                n = int((obj['last_bin_end'] - t)/dv_desc.bin_width)
                if n < 0 or n >= dv_desc.bin_count:
                    return None, 0
                total, count = obj['bins'][n].get('null', [0, 0])
                if count == 0:
                    return None, 0
                return total/count, count
            def get_bin(t):
                total, total_count = get_value(pool_rate, t)
                bad, bad_count = get_value(pool_stale_rate, t)
                if total is None or bad is None:
                    return {}
                count = int((total_count+bad_count)/2+1/2)
                return dict(good=[(total-bad)*count, count], bad=[bad*count, count], null=[0, count])
            bins = [get_bin(last_bin_end - (i+1/2)*dv_desc.bin_width) for i in xrange(dv_desc.bin_count)]
        return graph.DataView(dv_desc, ds_desc, last_bin_end, bins)
    hd = graph.HistoryDatabase.from_obj({
        'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
        'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
        'local_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
        'local_dead_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
        'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
            multivalue_undefined_means_0=True, default_func=build_pool_rates),
        'current_payout': graph.DataStreamDescription(dataview_descriptions),
        'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
        'incoming_peers': graph.DataStreamDescription(dataview_descriptions),
        'outgoing_peers': graph.DataStreamDescription(dataview_descriptions),
        'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
        'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
        'desired_versions': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
            multivalue_undefined_means_0=True),
        'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
    }, hd_obj)
    task.LoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj()))).start(100)
    @pseudoshare_received.watch
    def _(work, dead, user):
        t = time.time()
        hd.datastreams['local_hash_rate'].add_datum(t, work)
        if dead:
            hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
        if user is not None:
            hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
            if dead:
                hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
    @share_received.watch
    def _(work, dead):
        t = time.time()
        hd.datastreams['local_share_hash_rate'].add_datum(t, work)
        if dead:
            hd.datastreams['local_dead_share_hash_rate'].add_datum(t, work)
    @traffic_happened.watch
    def _(name, bytes):
        hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
    def add_point():
        if tracker.get_height(best_share_var.value) < 720:
            return
        t = time.time()
        hd.datastreams['pool_rates'].add_datum(t, p2pool_data.get_stale_counts(tracker, best_share_var.value, 720, rates=True))
        current_txouts = get_current_txouts()
        hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-8)
        miner_hash_rates, miner_dead_hash_rates = get_local_rates()
        current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, net.PARENT), amount) for script, amount in current_txouts.iteritems())
        hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
        hd.datastreams['incoming_peers'].add_datum(t, sum(1 for peer in p2p_node.peers.itervalues() if peer.incoming))
        hd.datastreams['outgoing_peers'].add_datum(t, sum(1 for peer in p2p_node.peers.itervalues() if not peer.incoming))
        
        vs = p2pool_data.get_desired_version_counts(tracker, best_share_var.value, 720)
        vs_total = sum(vs.itervalues())
        hd.datastreams['desired_versions'].add_datum(t, dict((str(k), v/vs_total) for k, v in vs.iteritems()))
    task.LoopingCall(add_point).start(5)
    new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
    
    web_root.putChild('static', static.File(os.path.join(os.path.dirname(sys.argv[0]), 'web-static')))
    
    return web_root
Esempio n. 13
0
 def register_art_url(self, url, cloud=False):
     if cloud:
         return None
     newurl = hashlib.md5(url).hexdigest() + url[-4:]
     self.resource.putChild(newurl, static.File(url))
     return self.webserver.weburl % get_default_v4_address() + '/' + newurl
Esempio n. 14
0
        params = json.loads(request.content.getvalue())
        cluster = clusters[int(params['cluster'])]
        if params.has_key('controller'):
            newDev = cluster.devices[int(params['controller'])]
            if cluster.activeDevice != newDev:
                cluster.setActiveDevice(newDev)
        if params.has_key('combo'):
            combo = tuple(params['combo'])
            switch = [x for x in switches if x.combo == combo][0]
            cluster.onSwitchButton(switch)
        return "OK"


class WebData(Resource):
    isLeaf = True

    def render_GET(self, request):
        request.responseHeaders.addRawHeader(b"content-type",
                                             b"application/json")
        return json.dumps(conf['devices'])


siteRoot = static.File(appPath + '\web')
siteRoot.putChild('cluster', WebCluster())
siteRoot.putChild('data', WebData())
init()

site = Site(siteRoot)
reactor.listenTCP(conf['http_port'], site)
reactor.run()
Esempio n. 15
0
    def render(self, request):
        action = "download"
        if "action" in request.args:
            action = request.args["action"][0]

        if "file" in request.args:
            filename = unquote(request.args["file"][0]).decode(
                'utf-8', 'ignore').encode('utf-8')

            if not os.path.exists(filename):
                return "File '%s' not found" % (filename)

            name = "stream"
            if "name" in request.args:
                name = request.args["name"][0]
            if action == "stream":
                response = "#EXTM3U\n#EXTVLCOPT--http-reconnect=true\n#EXTINF:-1,%s\nhttp://%s:%s/file?action=download&file=%s" % (
                    name, request.getRequestHostname(),
                    config.OpenWebif.port.value, quote(filename))
                request.setHeader("Content-Disposition:",
                                  'attachment;filename="%s.m3u"' % name)
                request.setHeader("Content-Type:", "audio/mpegurl")
                return response
            elif action == "delete":
                request.setResponseCode(http.OK)
                return "TODO: DELETE FILE: %s" % (filename)
            elif action == "download":
                request.setHeader(
                    "Content-Disposition:",
                    "attachment;filename=\"%s\"" % (filename.split('/')[-1]))
                rfile = static.File(filename,
                                    defaultType="application/octet-stream")
                return rfile.render(request)
            else:
                return "wrong action parameter"

        if "dir" in request.args:
            path = request.args["dir"][0]

            if "pattern" in request.args:
                pattern = request.args["pattern"][0]
            else:
                pattern = None

            directories = []
            files = []
            if fileExists(path):
                try:
                    files = listdir(path)
                except:
                    files = []

                files.sort()
                tmpfiles = files[:]
                for x in tmpfiles:
                    if os_path.isdir(path + x):
                        directories.append(path + x + "/")
                        files.remove(x)

                data = []
                data.append({
                    "result": True,
                    "dirs": directories,
                    "files": files
                })

                request.setHeader("content-type", "text/plain")
                request.write(json.dumps(data))
                request.finish()
                return server.NOT_DONE_YET
            else:
                return "path %s not exits" % (path)
Esempio n. 16
0
class ResultsPage(athena.LivePage):
    """default results page"""
    child_maaycss = static.File(get_path_of('maay.css'))
    child_images = static.File(get_path_of('images/'))
    docFactory = loaders.xmlfile(get_path_of('liveresults.html'))
    addSlash = False

    instances = []

    def __init__(self, maayId, results, query, offset):
        athena.LivePage.__init__(self)
        self.maayId = maayId
        self.results = results
        self.offset = offset
        self.query = query.words  # unicode(query)

    def data_results(self, context, data):
        return self.results

    def render_title(self, context, data):
        context.fillSlots('words', self.query)
        context.fillSlots('start_result',
                          min(len(self.results), self.offset + 1))
        context.fillSlots('end_result', self.offset + len(self.results))
        return context.tag

    def render_searchfield(self, context, data):
        context.fillSlots('words', self.query)
        return context.tag

    def render_prevset_url(self, context, data):
        words = WORDS_RGX.findall(
            normalizeText(unicode(context.arg('words'), 'utf-8')))
        offset = int(context.arg('offset', 0))
        if offset:
            offset -= 15
        return 'search?words=%s&offset=%s' % ('+'.join(words), offset)

    def render_nextset_url(self, context, data):
        words = WORDS_RGX.findall(
            normalizeText(unicode(context.arg('words'), 'utf-8')))
        offset = int(context.arg('offset', 0)) + 15
        return 'search?words=%s&offset=%s' % ('+'.join(words), offset)

    def render_row(self, context, data):
        document = data
        words = self.query.split()
        context.fillSlots('mime_type', re.sub("/", "_", document.mime_type))
        context.fillSlots('doctitle',
                          tags.xml(boldifyText(document.title, words)))
        # XXX abstract attribute should be a unicode string
        try:
            abstract = makeAbstract(document.text, words)
            abstract = normalize_text(unicode(abstract))
        except Exception, exc:
            import traceback
            traceback.print_exc()
            print exc
            abstract = u'No abstract available for this document [%s]' % exc
        context.fillSlots('abstract', tags.xml(abstract))
        context.fillSlots('docid', document.db_document_id)
        context.fillSlots('docurl', tags.xml(boldifyText(document.url, words)))
        context.fillSlots('words', self.query)
        context.fillSlots('readable_size', document.readable_size())
        date = datetime.fromtimestamp(document.publication_time)
        context.fillSlots('publication_date', date.strftime('%d %b %Y'))
        return context.tag
Esempio n. 17
0
def _cb_deferred(ret, request, p_ctx, others, resource, cb=True):
    ### set response headers
    resp_code = p_ctx.transport.resp_code

    # If user code set its own response code, don't touch it.
    if resp_code is None:
        resp_code = HTTP_200
    request.setResponseCode(int(resp_code[:3]))

    _set_response_headers(request, p_ctx.transport.resp_headers)

    ### normalize response data
    om = p_ctx.descriptor.out_message
    single_class = None
    if cb:
        if p_ctx.descriptor.is_out_bare():
            p_ctx.out_object = [ret]

        elif (not issubclass(om, ComplexModelBase)) or len(om._type_info) <= 1:
            p_ctx.out_object = [ret]
            if len(om._type_info) == 1:
                single_class, = om._type_info.values()
        else:
            p_ctx.out_object = ret
    else:
        p_ctx.out_object = ret

    ### start response
    retval = NOT_DONE_YET

    if isinstance(ret, PushBase):
        pass

    elif ((isclass(om) and issubclass(om, File)) or
          (isclass(single_class) and issubclass(single_class, File))) and \
         isinstance(p_ctx.out_protocol, HttpRpc) and \
                                      getattr(ret, 'abspath', None) is not None:

        file = static.File(ret.abspath,
                        defaultType=str(ret.type) or 'application/octet-stream')
        retval = _render_file(file, request)
        if retval != NOT_DONE_YET and cb:
            request.write(retval)
            request.finish()
            p_ctx.close()
        else:
            def _close_only_context(ret):
                p_ctx.close()

            request.notifyFinish().addCallback(_close_only_context)
            request.notifyFinish().addErrback(_eb_request_finished, request, p_ctx)

    else:
        ret = resource.http_transport.get_out_string(p_ctx)

        if not isinstance(ret, Deferred):
            producer = Producer(p_ctx.out_string, request)
            producer.deferred.addCallback(_cb_request_finished, request, p_ctx)
            producer.deferred.addErrback(_eb_request_finished, request, p_ctx)

            try:
                request.registerProducer(producer, False)
            except Exception as e:
                logger_server.exception(e)
                _eb_deferred(Failure(), request, p_ctx, others, resource)

        else:
            def _cb(ret):
                if isinstance(ret, Deferred):
                    return ret \
                        .addCallback(_cb) \
                        .addErrback(_eb_request_finished, request, p_ctx)
                else:
                    return _cb_request_finished(ret, request, p_ctx)

            ret \
                .addCallback(_cb) \
                .addErrback(_eb_request_finished, request, p_ctx)

    process_contexts(resource.http_transport, others, p_ctx)

    return retval
Esempio n. 18
0
 def __init__(self, listeningPort):
     resource.Resource.__init__(self)
     self.listeningPort = listeningPort
     self.static_files = static.File(
         os.path.join(os.path.dirname(__file__), 'static'))
     self.connections = {}
Esempio n. 19
0
	def setPiconChild(self, pp):
		self.putChild2("picon", static.File(six.ensure_binary(pp)))
Esempio n. 20
0
if __name__ == '__main__':
	import argparse
	import twisted.web.resource

	class SlideshowResource(twisted.web.resource.Resource):
		parser = argparse.ArgumentParser()
		parser.add_argument('slideshow', nargs=1)
		parser.add_argument('static_files', nargs=1)
		options = parser.parse_args()
		slideshow = options.slideshow[0]
		static_files = options.static_files[0]

		def render_GET(self, request):
			with file(self.slideshow) as f:
				slideshow = read_slides(f)

			slideshow = [make_slide(slide) for slide in slideshow]

			return make_document(slideshow).encode('utf-8')

	from twisted.internet import reactor
	from twisted.web import static, server
	root = static.File(SlideshowResource.static_files)
	root.putChild('', SlideshowResource())
	import os
	port = int(os.getenv('PORT', 8080))
	reactor.listenTCP(port, server.Site(root))
	reactor.run()


Esempio n. 21
0
def get_web_root(wb,
                 datadir_path,
                 dashd_getnetworkinfo_var,
                 stop_event=variable.Event(),
                 static_dir=None):
    node = wb.node
    start_time = time.time()

    web_root = resource.Resource()

    def get_users():
        height, last = node.tracker.get_height_and_last(
            node.best_share_var.value)
        weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(
            node.best_share_var.value, min(height, 720), 65535 * 2**256)
        res = {}
        for script in sorted(weights, key=lambda s: weights[s]):
            res[dash_data.script2_to_address(
                script, node.net.PARENT)] = weights[script] / total_weight
        return res

    def get_current_scaled_txouts(scale, trunc=0):
        txouts = node.get_current_txouts()
        total = sum(txouts.itervalues())
        results = dict((script, value * scale // total)
                       for script, value in txouts.iteritems())
        if trunc > 0:
            total_random = 0
            random_set = set()
            for s in sorted(results, key=results.__getitem__):
                if results[s] >= trunc:
                    break
                total_random += results[s]
                random_set.add(s)
            if total_random:
                winner = math.weighted_choice(
                    (script, results[script]) for script in random_set)
                for script in random_set:
                    del results[script]
                results[winner] = total_random
        if sum(results.itervalues()) < int(scale):
            results[math.weighted_choice(
                results.iteritems())] += int(scale) - sum(results.itervalues())
        return results

    def get_patron_sendmany(total=None, trunc='0.01'):
        if total is None:
            return 'need total argument. go to patron_sendmany/<TOTAL>'
        total = int(float(total) * 1e8)
        trunc = int(float(trunc) * 1e8)
        return json.dumps(
            dict(
                (dash_data.script2_to_address(script, node.net.PARENT),
                 value / 1e8) for script, value in get_current_scaled_txouts(
                     total, trunc).iteritems() if dash_data.script2_to_address(
                         script, node.net.PARENT) is not None))

    def get_global_stats():
        # averaged over last hour
        if node.tracker.get_height(node.best_share_var.value) < 10:
            return None
        lookbehind = min(node.tracker.get_height(node.best_share_var.value),
                         3600 // node.net.SHARE_PERIOD)

        nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(
            node.tracker, node.best_share_var.value, lookbehind)
        stale_prop = p2pool_data.get_average_stale_prop(
            node.tracker, node.best_share_var.value, lookbehind)
        diff = dash_data.target_to_difficulty(
            wb.current_work.value['bits'].target)
        return dict(
            pool_nonstale_hash_rate=nonstale_hash_rate,
            pool_hash_rate=nonstale_hash_rate / (1 - stale_prop),
            pool_stale_prop=stale_prop,
            min_difficulty=dash_data.target_to_difficulty(
                node.tracker.items[node.best_share_var.value].max_target),
            network_block_difficulty=diff,
            network_hashrate=(diff * 2**32 // node.net.PARENT.BLOCK_PERIOD),
        )

    def get_local_stats():
        if node.tracker.get_height(node.best_share_var.value) < 10:
            return None
        lookbehind = min(node.tracker.get_height(node.best_share_var.value),
                         3600 // node.net.SHARE_PERIOD)

        global_stale_prop = p2pool_data.get_average_stale_prop(
            node.tracker, node.best_share_var.value, lookbehind)

        my_unstale_count = sum(1 for share in node.tracker.get_chain(
            node.best_share_var.value, lookbehind)
                               if share.hash in wb.my_share_hashes)
        my_orphan_count = sum(1 for share in node.tracker.get_chain(
            node.best_share_var.value, lookbehind)
                              if share.hash in wb.my_share_hashes
                              and share.share_data['stale_info'] == 'orphan')
        my_doa_count = sum(1 for share in node.tracker.get_chain(
            node.best_share_var.value, lookbehind)
                           if share.hash in wb.my_share_hashes
                           and share.share_data['stale_info'] == 'doa')
        my_share_count = my_unstale_count + my_orphan_count + my_doa_count
        my_stale_count = my_orphan_count + my_doa_count

        my_stale_prop = my_stale_count / my_share_count if my_share_count != 0 else None

        my_work = sum(
            dash_data.target_to_average_attempts(share.target) for share in
            node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
            if share.hash in wb.my_share_hashes)
        actual_time = (
            node.tracker.items[node.best_share_var.value].timestamp -
            node.tracker.items[node.tracker.get_nth_parent_hash(
                node.best_share_var.value, lookbehind - 1)].timestamp)
        share_att_s = my_work / actual_time

        miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
        (stale_orphan_shares,
         stale_doa_shares), shares, _ = wb.get_stale_counts()

        miner_last_difficulties = {}
        for addr in wb.last_work_shares.value:
            miner_last_difficulties[addr] = dash_data.target_to_difficulty(
                wb.last_work_shares.value[addr].target)

        return dict(
            my_hash_rates_in_last_hour=dict(
                note="DEPRECATED",
                nonstale=share_att_s,
                rewarded=share_att_s / (1 - global_stale_prop),
                actual=share_att_s /
                (1 - my_stale_prop) if my_stale_prop is not None else
                0,  # 0 because we don't have any shares anyway
            ),
            my_share_counts_in_last_hour=dict(
                shares=my_share_count,
                unstale_shares=my_unstale_count,
                stale_shares=my_stale_count,
                orphan_stale_shares=my_orphan_count,
                doa_stale_shares=my_doa_count,
            ),
            my_stale_proportions_in_last_hour=dict(
                stale=my_stale_prop,
                orphan_stale=my_orphan_count /
                my_share_count if my_share_count != 0 else None,
                dead_stale=my_doa_count /
                my_share_count if my_share_count != 0 else None,
            ),
            miner_hash_rates=miner_hash_rates,
            miner_dead_hash_rates=miner_dead_hash_rates,
            miner_last_difficulties=miner_last_difficulties,
            efficiency_if_miner_perfect=(1 - stale_orphan_shares / shares) /
            (1 - global_stale_prop) if shares else
            None,  # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
            efficiency=(1 -
                        (stale_orphan_shares + stale_doa_shares) / shares) /
            (1 - global_stale_prop) if shares else None,
            peers=dict(
                incoming=sum(1 for peer in node.p2p_node.peers.itervalues()
                             if peer.incoming),
                outgoing=sum(1 for peer in node.p2p_node.peers.itervalues()
                             if not peer.incoming),
            ),
            shares=dict(
                total=shares,
                orphan=stale_orphan_shares,
                dead=stale_doa_shares,
            ),
            uptime=time.time() - start_time,
            attempts_to_share=dash_data.target_to_average_attempts(
                node.tracker.items[node.best_share_var.value].max_target),
            attempts_to_block=dash_data.target_to_average_attempts(
                node.dashd_work.value['bits'].target),
            block_value=node.dashd_work.value['subsidy'] * 1e-8,
            warnings=p2pool_data.get_warnings(node.tracker,
                                              node.best_share_var.value,
                                              node.net,
                                              dashd_getnetworkinfo_var.value,
                                              node.dashd_work.value),
            donation_proportion=wb.donation_percentage / 100,
            version=p2pool.__version__,
            protocol_version=p2p.Protocol.VERSION,
            fee=wb.worker_fee,
        )

    class WebInterface(deferred_resource.DeferredResource):
        def __init__(self, func, mime_type='application/json', args=()):
            deferred_resource.DeferredResource.__init__(self)
            self.func, self.mime_type, self.args = func, mime_type, args

        def getChild(self, child, request):
            return WebInterface(self.func, self.mime_type,
                                self.args + (child, ))

        @defer.inlineCallbacks
        def render_GET(self, request):
            request.setHeader('Content-Type', self.mime_type)
            request.setHeader('Access-Control-Allow-Origin', '*')
            res = yield self.func(*self.args)
            defer.returnValue(
                json.dumps(res) if self.mime_type ==
                'application/json' else res)

    def decent_height():
        return min(node.tracker.get_height(node.best_share_var.value), 720)

    web_root.putChild(
        'rate',
        WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(
            node.tracker, node.best_share_var.value, decent_height(
            )) / (1 - p2pool_data.get_average_stale_prop(
                node.tracker, node.best_share_var.value, decent_height()))))
    web_root.putChild(
        'difficulty',
        WebInterface(lambda: dash_data.target_to_difficulty(node.tracker.items[
            node.best_share_var.value].max_target)))
    web_root.putChild('users', WebInterface(get_users))
    web_root.putChild(
        'user_stales',
        WebInterface(lambda: dict((dash_data.pubkey_hash_to_address(
            ph, node.net.PARENT
        ), prop) for ph, prop in p2pool_data.get_user_stale_props(
            node.tracker, node.best_share_var.value,
            node.tracker.get_height(node.best_share_var.value)).iteritems())))
    web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
    web_root.putChild(
        'current_payouts',
        WebInterface(lambda: dict(
            (dash_data.script2_to_address(script, node.net.PARENT), value / 1e8
             ) for script, value in node.get_current_txouts().iteritems())))
    web_root.putChild('patron_sendmany',
                      WebInterface(get_patron_sendmany, 'text/plain'))
    web_root.putChild('global_stats', WebInterface(get_global_stats))
    web_root.putChild('local_stats', WebInterface(get_local_stats))
    web_root.putChild(
        'peer_addresses',
        WebInterface(lambda: ' '.join('%s%s' % (
            peer.transport.getPeer().host, ':' + str(peer.transport.getPeer(
            ).port) if peer.transport.getPeer().port != node.net.P2P_PORT else
            '') for peer in node.p2p_node.peers.itervalues())))
    web_root.putChild(
        'peer_txpool_sizes',
        WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer(
        ).host, peer.transport.getPeer().port), peer.remembered_txs_size) for
                                  peer in node.p2p_node.peers.itervalues())))
    web_root.putChild(
        'pings',
        WebInterface(
            defer.inlineCallbacks(lambda: defer.returnValue(
                dict([(a, (yield b)) for a, b in [(
                    '%s:%i' % (peer.transport.getPeer().host,
                               peer.transport.getPeer().port),
                    defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
                        min([(yield peer.do_ping().addCallback(
                            lambda x: x / 0.001).addErrback(lambda fail: None))
                             for i in xrange(3)])))()) for peer in list(
                                 node.p2p_node.peers.itervalues())]])))))
    web_root.putChild(
        'peer_versions',
        WebInterface(lambda: dict(
            ('%s:%i' % peer.addr, peer.other_sub_version)
            for peer in node.p2p_node.peers.itervalues())))
    web_root.putChild(
        'payout_addr',
        WebInterface(lambda: dash_data.pubkey_hash_to_address(
            wb.my_pubkey_hash, node.net.PARENT)))
    web_root.putChild(
        'payout_addrs',
        WebInterface(lambda: list(('%s' % dash_data.pubkey_hash_to_address(
            add, node.net.PARENT)) for add in wb.pubkeys.keys)))

    def height_from_coinbase(coinbase):
        opcode = ord(coinbase[0]) if len(coinbase) > 0 else 0
        if opcode >= 1 and opcode <= 75:
            return pack.IntType(opcode * 8).unpack(coinbase[1:opcode + 1])
        if opcode == 76:
            return pack.IntType(8).unpack(coinbase[1:2])
        if opcode == 77:
            return pack.IntType(8).unpack(coinbase[1:3])
        if opcode == 78:
            return pack.IntType(8).unpack(coinbase[1:5])
        if opcode >= 79 and opcode <= 96:
            return opcode - 80
        return None

    web_root.putChild(
        'recent_blocks',
        WebInterface(lambda: [
            dict(
                ts=s.timestamp,
                hash='%064x' % s.header_hash,
                number=height_from_coinbase(s.share_data['coinbase']),
                share='%064x' % s.hash,
            ) for s in node.tracker.get_chain(
                node.best_share_var.value,
                min(node.tracker.get_height(node.best_share_var.value), 24 * 60
                    * 60 // node.net.SHARE_PERIOD))
            if s.pow_hash <= s.header['bits'].target
        ]))
    web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
    web_root.putChild(
        'stale_rates',
        WebInterface(
            lambda: p2pool_data.get_stale_counts(node.tracker,
                                                 node.best_share_var.value,
                                                 decent_height(),
                                                 rates=True)))

    new_root = resource.Resource()
    web_root.putChild('web', new_root)

    stat_log = []
    if os.path.exists(os.path.join(datadir_path, 'stats')):
        try:
            with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
                stat_log = json.loads(f.read())
        except:
            log.err(None, 'Error loading stats:')

    def update_stat_log():
        while stat_log and stat_log[0]['time'] < time.time() - 24 * 60 * 60:
            stat_log.pop(0)

        lookbehind = 3600 // node.net.SHARE_PERIOD
        if node.tracker.get_height(node.best_share_var.value) < lookbehind:
            return None

        global_stale_prop = p2pool_data.get_average_stale_prop(
            node.tracker, node.best_share_var.value, lookbehind)
        (stale_orphan_shares,
         stale_doa_shares), shares, _ = wb.get_stale_counts()
        miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
        my_current_payout = 0.0
        for add in wb.pubkeys.keys:
            my_current_payout += node.get_current_txouts().get(
                dash_data.pubkey_hash_to_script2(add), 0) * 1e-8

        stat_log.append(
            dict(
                time=time.time(),
                pool_hash_rate=p2pool_data.get_pool_attempts_per_second(
                    node.tracker, node.best_share_var.value, lookbehind) /
                (1 - global_stale_prop),
                pool_stale_prop=global_stale_prop,
                local_hash_rates=miner_hash_rates,
                local_dead_hash_rates=miner_dead_hash_rates,
                shares=shares,
                stale_shares=stale_orphan_shares + stale_doa_shares,
                stale_shares_breakdown=dict(orphan=stale_orphan_shares,
                                            doa=stale_doa_shares),
                current_payout=my_current_payout,
                peers=dict(
                    incoming=sum(1
                                 for peer in node.p2p_node.peers.itervalues()
                                 if peer.incoming),
                    outgoing=sum(1
                                 for peer in node.p2p_node.peers.itervalues()
                                 if not peer.incoming),
                ),
                attempts_to_share=dash_data.target_to_average_attempts(
                    node.tracker.items[node.best_share_var.value].max_target),
                attempts_to_block=dash_data.target_to_average_attempts(
                    node.dashd_work.value['bits'].target),
                block_value=node.dashd_work.value['subsidy'] * 1e-8,
            ))

        with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
            f.write(json.dumps(stat_log))

    x = deferral.RobustLoopingCall(update_stat_log)
    x.start(5 * 60)
    stop_event.watch(x.stop)
    new_root.putChild('log', WebInterface(lambda: stat_log))

    def get_share(share_hash_str):
        if int(share_hash_str, 16) not in node.tracker.items:
            return None
        share = node.tracker.items[int(share_hash_str, 16)]

        return dict(
            parent='%064x' % share.previous_hash,
            far_parent='%064x' % share.share_info['far_share_hash'],
            children=[
                '%064x' % x for x in sorted(
                    node.tracker.reverse.get(share.hash, set()),
                    key=lambda sh: -len(node.tracker.reverse.get(sh, set())))
            ],  # sorted from most children to least children
            type_name=type(share).__name__,
            local=dict(
                verified=share.hash in node.tracker.verified.items,
                time_first_seen=start_time
                if share.time_seen == 0 else share.time_seen,
                peer_first_received_from=share.peer_addr,
            ),
            share_data=dict(
                timestamp=share.timestamp,
                target=share.target,
                max_target=share.max_target,
                payout_address=dash_data.script2_to_address(
                    share.new_script, node.net.PARENT),
                donation=share.share_data['donation'] / 65535,
                stale_info=share.share_data['stale_info'],
                nonce=share.share_data['nonce'],
                desired_version=share.share_data['desired_version'],
                absheight=share.absheight,
                abswork=share.abswork,
            ),
            block=dict(
                hash='%064x' % share.header_hash,
                header=dict(
                    version=share.header['version'],
                    previous_block='%064x' % share.header['previous_block'],
                    merkle_root='%064x' % share.header['merkle_root'],
                    timestamp=share.header['timestamp'],
                    target=share.header['bits'].target,
                    nonce=share.header['nonce'],
                ),
                gentx=dict(
                    hash='%064x' % share.gentx_hash,
                    coinbase=share.share_data['coinbase'].ljust(
                        2, '\x00').encode('hex'),
                    value=share.share_data['subsidy'] * 1e-8,
                    last_txout_nonce='%016x' %
                    share.contents['last_txout_nonce'],
                ),
                other_transaction_hashes=[
                    '%064x' % x
                    for x in share.get_other_tx_hashes(node.tracker)
                ],
            ),
        )

    def get_share_address(share_hash_str):
        if int(share_hash_str, 16) not in node.tracker.items:
            return None
        share = node.tracker.items[int(share_hash_str, 16)]
        return dash_data.script2_to_address(share.new_script, node.net.PARENT)

    new_root.putChild(
        'payout_address',
        WebInterface(lambda share_hash_str: get_share_address(share_hash_str)))
    new_root.putChild(
        'share',
        WebInterface(lambda share_hash_str: get_share(share_hash_str)))
    new_root.putChild(
        'heads',
        WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
    new_root.putChild(
        'verified_heads',
        WebInterface(
            lambda: ['%064x' % x for x in node.tracker.verified.heads]))
    new_root.putChild(
        'tails',
        WebInterface(lambda: [
            '%064x' % x for t in node.tracker.tails
            for x in node.tracker.reverse.get(t, set())
        ]))
    new_root.putChild(
        'verified_tails',
        WebInterface(lambda: [
            '%064x' % x for t in node.tracker.verified.tails
            for x in node.tracker.verified.reverse.get(t, set())
        ]))
    new_root.putChild(
        'best_share_hash',
        WebInterface(lambda: '%064x' % node.best_share_var.value
                     if node.best_share_var.value is not None else 0))
    new_root.putChild(
        'my_share_hashes',
        WebInterface(
            lambda:
            ['%064x' % my_share_hash for my_share_hash in wb.my_share_hashes]))

    def get_share_data(share_hash_str):
        if int(share_hash_str, 16) not in node.tracker.items:
            return ''
        share = node.tracker.items[int(share_hash_str, 16)]
        return p2pool_data.share_type.pack(share.as_share())

    new_root.putChild(
        'share_data',
        WebInterface(lambda share_hash_str: get_share_data(share_hash_str),
                     'application/octet-stream'))
    new_root.putChild(
        'currency_info',
        WebInterface(lambda: dict(
            symbol=node.net.PARENT.SYMBOL,
            block_explorer_url_prefix=node.net.PARENT.
            BLOCK_EXPLORER_URL_PREFIX,
            address_explorer_url_prefix=node.net.PARENT.
            ADDRESS_EXPLORER_URL_PREFIX,
            tx_explorer_url_prefix=node.net.PARENT.TX_EXPLORER_URL_PREFIX,
        )))
    new_root.putChild('version', WebInterface(lambda: p2pool.__version__))

    hd_path = os.path.join(datadir_path, 'graph_db')
    hd_data = _atomic_read(hd_path)
    hd_obj = {}
    if hd_data is not None:
        try:
            hd_obj = json.loads(hd_data)
        except Exception:
            log.err(None, 'Error reading graph database:')
    dataview_descriptions = {
        'last_hour': graph.DataViewDescription(150, 60 * 60),
        'last_day': graph.DataViewDescription(300, 60 * 60 * 24),
        'last_week': graph.DataViewDescription(300, 60 * 60 * 24 * 7),
        'last_month': graph.DataViewDescription(300, 60 * 60 * 24 * 30),
        'last_year': graph.DataViewDescription(300, 60 * 60 * 24 * 365.25),
    }
    hd = graph.HistoryDatabase.from_obj(
        {
            'local_hash_rate':
            graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
            'local_dead_hash_rate':
            graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
            'local_share_hash_rates':
            graph.DataStreamDescription(
                dataview_descriptions,
                is_gauge=False,
                multivalues=True,
                multivalue_undefined_means_0=True,
                default_func=graph.make_multivalue_migrator(
                    dict(good='local_share_hash_rate',
                         dead='local_dead_share_hash_rate',
                         orphan='local_orphan_share_hash_rate'),
                    post_func=lambda bins: [
                        dict((k, (v[0] - (sum(
                            bin.get(rem_k, (0, 0))[0]
                            for rem_k in ['dead', 'orphan'])
                                          if k == 'good' else 0), v[1]))
                             for k, v in bin.iteritems()) for bin in bins
                    ])),
            'pool_rates':
            graph.DataStreamDescription(dataview_descriptions,
                                        multivalues=True,
                                        multivalue_undefined_means_0=True),
            'current_payout':
            graph.DataStreamDescription(dataview_descriptions),
            'current_payouts':
            graph.DataStreamDescription(dataview_descriptions,
                                        multivalues=True),
            'peers':
            graph.DataStreamDescription(
                dataview_descriptions,
                multivalues=True,
                default_func=graph.make_multivalue_migrator(
                    dict(incoming='incoming_peers',
                         outgoing='outgoing_peers'))),
            'miner_hash_rates':
            graph.DataStreamDescription(
                dataview_descriptions, is_gauge=False, multivalues=True),
            'miner_dead_hash_rates':
            graph.DataStreamDescription(
                dataview_descriptions, is_gauge=False, multivalues=True),
            'desired_version_rates':
            graph.DataStreamDescription(dataview_descriptions,
                                        multivalues=True,
                                        multivalue_undefined_means_0=True),
            'traffic_rate':
            graph.DataStreamDescription(
                dataview_descriptions, is_gauge=False, multivalues=True),
            'getwork_latency':
            graph.DataStreamDescription(dataview_descriptions),
            'memory_usage':
            graph.DataStreamDescription(dataview_descriptions),
        }, hd_obj)
    x = deferral.RobustLoopingCall(
        lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
    x.start(100)
    stop_event.watch(x.stop)

    @wb.pseudoshare_received.watch
    def _(work, dead, user):
        t = time.time()
        hd.datastreams['local_hash_rate'].add_datum(t, work)
        if dead:
            hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
        if user is not None:
            hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
            if dead:
                hd.datastreams['miner_dead_hash_rates'].add_datum(
                    t, {user: work})

    @wb.share_received.watch
    def _(work, dead, share_hash):
        t = time.time()
        if not dead:
            hd.datastreams['local_share_hash_rates'].add_datum(
                t, dict(good=work))
        else:
            hd.datastreams['local_share_hash_rates'].add_datum(
                t, dict(dead=work))

        def later():
            res = node.tracker.is_child_of(share_hash,
                                           node.best_share_var.value)
            if res is None:
                res = False  # share isn't connected to sharechain? assume orphaned
            if res and dead:  # share was DOA, but is now in sharechain
                # move from dead to good
                hd.datastreams['local_share_hash_rates'].add_datum(
                    t, dict(dead=-work, good=work))
            elif not res and not dead:  # share wasn't DOA, and isn't in sharechain
                # move from good to orphan
                hd.datastreams['local_share_hash_rates'].add_datum(
                    t, dict(good=-work, orphan=work))

        reactor.callLater(200, later)

    @node.p2p_node.traffic_happened.watch
    def _(name, bytes):
        hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})

    def add_point():
        if node.tracker.get_height(node.best_share_var.value) < 10:
            return None
        lookbehind = min(node.net.CHAIN_LENGTH,
                         60 * 60 // node.net.SHARE_PERIOD,
                         node.tracker.get_height(node.best_share_var.value))
        t = time.time()

        pool_rates = p2pool_data.get_stale_counts(node.tracker,
                                                  node.best_share_var.value,
                                                  lookbehind,
                                                  rates=True)
        pool_total = sum(pool_rates.itervalues())
        hd.datastreams['pool_rates'].add_datum(t, pool_rates)

        current_txouts = node.get_current_txouts()
        my_current_payouts = 0.0
        for add in wb.pubkeys.keys:
            my_current_payouts += current_txouts.get(
                dash_data.pubkey_hash_to_script2(add), 0) * 1e-8
        hd.datastreams['current_payout'].add_datum(t, my_current_payouts)
        miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
        current_txouts_by_address = dict(
            (dash_data.script2_to_address(script, node.net.PARENT), amount)
            for script, amount in current_txouts.iteritems())
        hd.datastreams['current_payouts'].add_datum(
            t,
            dict((user, current_txouts_by_address[user] * 1e-8)
                 for user in miner_hash_rates
                 if user in current_txouts_by_address))

        hd.datastreams['peers'].add_datum(
            t,
            dict(
                incoming=sum(1 for peer in node.p2p_node.peers.itervalues()
                             if peer.incoming),
                outgoing=sum(1 for peer in node.p2p_node.peers.itervalues()
                             if not peer.incoming),
            ))

        vs = p2pool_data.get_desired_version_counts(node.tracker,
                                                    node.best_share_var.value,
                                                    lookbehind)
        vs_total = sum(vs.itervalues())
        hd.datastreams['desired_version_rates'].add_datum(
            t,
            dict((str(k), v / vs_total * pool_total)
                 for k, v in vs.iteritems()))
        try:
            hd.datastreams['memory_usage'].add_datum(t, memory.resident())
        except:
            if p2pool.DEBUG:
                traceback.print_exc()

    x = deferral.RobustLoopingCall(add_point)
    x.start(5)
    stop_event.watch(x.stop)

    @node.dashd_work.changed.watch
    def _(new_work):
        hd.datastreams['getwork_latency'].add_datum(time.time(),
                                                    new_work['latency'])

    new_root.putChild(
        'graph_data',
        WebInterface(lambda source, view: hd.datastreams[source].dataviews[
            view].get_data(time.time())))

    if static_dir is None:
        static_dir = os.path.join(
            os.path.dirname(os.path.abspath(sys.argv[0])), 'web-static')
    web_root.putChild('static', static.File(static_dir))

    return web_root
Esempio n. 22
0
 def static(self, request):
     return static.File(config.web_ui_directory)
Esempio n. 23
0
def main(argv):
    usage = "usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option("-f",
                      "--force",
                      action="store_true",
                      dest="force",
                      help="Overwrite existing Sensei node info")
    parser.add_option("-c",
                      "--reset",
                      action="store_true",
                      dest="reset",
                      help="Remove all registered nodes and then exit")
    parser.add_option("-i",
                      "--init",
                      action="store_true",
                      dest="init",
                      help="Initialize nodes")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      help="Verbose mode")
    (options, args) = parser.parse_args()

    logging.basicConfig(format='[%(asctime)s] %(levelname)-8s"%(message)s"',
                        datefmt='%Y-%m-%d %a %H:%M:%S')

    if options.verbose:
        logger.setLevel(logging.NOTSET)

    initialize()

    zookeeper.set_log_stream(open("/dev/null"))
    global cluster_client
    cluster_client = SinClusterClient(settings.SIN_SERVICE_NAME,
                                      settings.ZOOKEEPER_URL,
                                      settings.ZOOKEEPER_TIMEOUT)
    cluster_client.logger.setLevel(logging.INFO)
    cluster_client.logger.addHandler(logging.StreamHandler())
    cluster_client.add_listener(SinClusterListener())

    if options.force or options.reset:
        cluster_client.reset()
        Node.objects.all().delete()
        logger.info("Removed all registered nodes from the system.")
        logger.info("You may want to shut down all the agents.")
        if options.reset:
            return

    for node in settings.SIN_NODES["nodes"]:
        n, created = Node.objects.get_or_create(id=node["node_id"],
                                                defaults={
                                                    "id": node["node_id"],
                                                    "host": node["host"],
                                                    "agent_port": node["port"],
                                                    "online": False,
                                                    "group": Group(pk=1),
                                                })
        if not created:
            if n.host != node["host"] or n.agent_port != node["port"]:
                n.host = node["host"]
                n.agent_port = node["port"]
                n.save()
                cluster_client.remove_node(node["node_id"])
        cluster_client.register_node(node["node_id"],
                                     node["host"],
                                     port=node["port"])

    if options.init:
        return

    # Reset online status.  Some node(s) might have gone offline while Sin
    # server was down, therefore the server did not get notified and still
    # keeps the old "online" status for the node(s).  If the node(s) are
    # still online, we will send the start-store commands to them anyway.
    # If a store is already running on a node, the start-store command
    # will simply become a no-op.
    Node.objects.filter(online=True).update(online=False)

    static_files = static.File(os.path.join(os.path.join(SIN_HOME, 'admin')))
    WSGI = wsgi.WSGIResource(reactor, pool, WSGIHandler())
    root = Root(WSGI)
    root.putChild('static', static_files)

    log.startLogging(sys.stdout)
    site = server.Site(root)
    reactor.listenTCP(settings.SIN_LISTEN, site)
    pool.start()

    def post_initialization():
        cluster_client.notify_all()

    reactor.callInThread(post_initialization)

    reactor.callInThread(monitoring)

    reactor.run(installSignalHandlers=False)
Esempio n. 24
0
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This shows an example of a bare-bones distributed web set up.  The "master" and
"slave" parts will usually be in different files -- they are here together only
for brevity of illustration.  In normal usage they would each run in a separate
process.

Usage:
    $ python silly-web.py

Then visit http://localhost:19988/.
"""

from twisted.internet import reactor
from twisted.spread import pb
from twisted.web import distrib, server, static

# The "master" server
site = server.Site(distrib.ResourceSubscription("unix", ".rp"))
reactor.listenTCP(19988, site)

# The "slave" server
fact = pb.PBServerFactory(
    distrib.ResourcePublisher(server.Site(static.File("static"))))

reactor.listenUNIX("./.rp", fact)
reactor.run()
Esempio n. 25
0
    protocol = WSProtocol

class PbConnection( pb.Referenceable ):
    def __init__( self, client, data ):
        controllers[ data['id'] ] = self
        self.controller = data
        self.client = client

    def remote_data( self, data ):
        for device in data['devices']:
            for prop in data['devices'][device]:
                self.controller['devices'][device][prop] = data['devices'][device][prop]


class PbServer( pb.Root ):

    def remote_connect( self, client, data ):
        pbc = PbConnection( client, data )
        print "pb client connected" 
        return pbc
        #d = json.loads( data )
        #client.callRemote( "cmd", { "cmd": "set", "state": 1 } )

siteRoot = static.File(conf['site']['root'])
reactor.listenTCP( conf['site']['port'], server.Site( siteRoot ) )
reactor.listenTCP( conf['pb']['port'], pb.PBServerFactory( PbServer() ) )
reactor.listenTCP( conf['ws']['port'],  WebSocketFactory( WSFactory() ) )
pingCall = task.LoopingCall( sendIds )
pingCall.start( 5 )
reactor.run()
Esempio n. 26
0
 def requestAvatar(self, avatarId, mind, *interfaces):
     if resource.IResource in interfaces:
         return (resource.IResource,
                 static.File(settings.config["web"]["configroot"]),
                 lambda: None)
         raise NotImplementedError()
Esempio n. 27
0
def worker(options):
    """
   Start background worker process.
   """
    workerPid = os.getpid()

    payload = "*" * options.payload

    if options.resource == 'file':
        f = open('index.html', 'wb')
        f.write(payload)
        f.close()
        root = static.File('.')

    elif options.resource == 'data':
        root = static.Data(payload, 'text/html')

    elif options.resource == 'fixed':
        root = FixedResource(payload)

    else:
        raise Exception("logic error")

    if not options.silence:
        print "Worker started on PID %s using resource %s" % (workerPid, root)

    if not options.silence:
        site = CountingSite(root)
    else:
        site = Site(root)
    site.log = lambda _: None  # disable any logging

    ## The master already created the socket, just start listening and accepting
    ##
    port = reactor.adoptStreamPort(options.fd, AF_INET, site)

    if options.profile:
        statprof.start()

    if not options.silence:

        def stat():
            if options.profile:
                statprof.stop()

            output = StringIO.StringIO()
            output.write("-" * 80)
            output.write("\nWorker with PID %s processed %d requests\n" %
                         (workerPid, site.cnt))

            if options.profile:
                output.write("\n")
                #format = statprof.DisplayFormats.ByLine
                #format = statprof.DisplayFormats.ByMethod
                #statprof.display(output, format = format)
                statprof.display(output)

            output.write("-" * 80)
            output.write("\n")
            output.write("\n")

            sys.stdout.write(output.getvalue())

            if options.profile:
                statprof.reset()
                statprof.start()

            reactor.callLater(options.interval, stat)

        reactor.callLater(options.interval, stat)

    reactor.run()
Esempio n. 28
0
 def loadChildren(self):
     for f in os.listdir(self.path):
         if os.path.isdir(os.path.join(self.path, f)):
             self.resource.putChild(f, static.File(self.path + f))
Esempio n. 29
0
    def run(self):
        modeStr = None
        if self._evaluationMode is not None:
            modeStr = DataModel.EEvaluationModeNames.name(self._evaluationMode)
        whitelistStr = "<None>"
        if self._eventTypeWhitelist is not None:
            whitelistStr = ", ".join(self._eventTypeWhitelist)
        blacklistStr = "<None>"
        if self._eventTypeBlacklist is not None:
            blacklistStr = ", ".join(self._eventTypeBlacklist)
        stationFilterStr = "<None>"
        if self._stationFilter is not None:
            stationFilterStr = self._stationFilter
        dataSelectFilterStr = "<None>"
        if self._dataSelectFilter is not None:
            dataSelectFilterStr = self._dataSelectFilter
        Logging.debug("\n" \
                       "configuration read:\n" \
                       "  serve\n" \
                       "    dataselect    : %s\n" \
                       "    event         : %s\n" \
                       "    station       : %s\n" \
                       "  listenAddress   : %s\n" \
                       "  port            : %i\n" \
                       "  connections     : %i\n" \
                       "  htpasswd        : %s\n" \
                       "  accessLog       : %s\n" \
                       "  queryObjects    : %i\n" \
                       "  realtimeGap     : %s\n" \
                       "  samples (M)     : %s\n" \
                       "  allowRestricted : %s\n" \
                       "  useArclinkAccess: %s\n" \
                       "  hideAuthor      : %s\n" \
                       "  evaluationMode  : %s\n" \
                       "  eventType\n" \
                       "    whitelist     : %s\n" \
                       "    blacklist     : %s\n" \
                       "  inventory filter\n" \
                       "    station       : %s\n" \
                       "    dataSelect    : %s\n" \
                       "    debug enabled : %s\n" \
                       "  trackdb\n" \
                       "    enabled       : %s\n" \
                       "    defaultUser   : %s\n" \
                       "  auth\n" \
                       "    enabled       : %s\n" \
                       "    gnupgHome     : %s\n" % (
                       self._serveDataSelect, self._serveEvent,
                       self._serveStation, self._listenAddress, self._port,
                       self._connections, self._htpasswd, self._accessLogFile,
                       self._queryObjects, self._realtimeGap, self._samplesM,
                       self._allowRestricted, self._useArclinkAccess,
                       self._hideAuthor, modeStr,
                       whitelistStr, blacklistStr, stationFilterStr,
                       dataSelectFilterStr, self._debugFilter,
                       self._trackdbEnabled, self._trackdbDefaultUser,
                       self._authEnabled, self._authGnupgHome))

        if not self._serveDataSelect and not self._serveEvent and \
           not self._serveStation:
            Logging.error("all services disabled through configuration")
            return False

        # access logger if requested
        if self._accessLogFile:
            self._accessLog = Log(self._accessLogFile)

        # load inventory needed by DataSelect and Station service
        stationInv = dataSelectInv = None
        if self._serveDataSelect or self._serveStation:
            retn = False
            stationInv = dataSelectInv = Inventory.Instance().inventory()
            Logging.info("inventory loaded")

            if self._serveDataSelect and self._serveStation:
                # clone inventory if station and dataSelect filter are distinct
                # else share inventory between both services
                if self._stationFilter != self._dataSelectFilter:
                    dataSelectInv = self._cloneInventory(stationInv)
                    retn = self._filterInventory(stationInv, self._stationFilter, "station") and \
                           self._filterInventory(dataSelectInv, self._dataSelectFilter, "dataSelect")
                else:
                    retn = self._filterInventory(stationInv,
                                                 self._stationFilter)
            elif self._serveStation:
                retn = self._filterInventory(stationInv, self._stationFilter)
            else:
                retn = self._filterInventory(dataSelectInv,
                                             self._dataSelectFilter)

            if not retn:
                return False

        if self._serveDataSelect and self._useArclinkAccess:
            self._access.initFromSC3Routing(self.query().loadRouting())

        DataModel.PublicObject.SetRegistrationEnabled(False)

        shareDir = os.path.join(Environment.Instance().shareDir(), 'fdsnws')

        # Overwrite/set mime type of *.wadl and *.xml documents. Instead of
        # using the official types defined in /etc/mime.types 'application/xml'
        # is used as enforced by the FDSNWS spec.
        static.File.contentTypes['.wadl'] = 'application/xml'
        static.File.contentTypes['.xml'] = 'application/xml'

        # create resource tree /fdsnws/...
        root = ListingResource()

        fileName = os.path.join(shareDir, 'favicon.ico')
        fileRes = static.File(fileName, 'image/x-icon')
        fileRes.childNotFound = NoResource()
        fileRes.isLeaf = True
        root.putChild('favicon.ico', fileRes)

        prefix = ListingResource()
        root.putChild('fdsnws', prefix)

        # right now service version is shared by all services
        serviceVersion = ServiceVersion()

        # dataselect
        if self._serveDataSelect:
            dataselect = ListingResource()
            prefix.putChild('dataselect', dataselect)
            dataselect1 = DirectoryResource(
                os.path.join(shareDir, 'dataselect.html'))
            dataselect.putChild('1', dataselect1)

            dataselect1.putChild(
                'query', FDSNDataSelect(dataSelectInv, self._recordBulkSize))
            msg = 'authorization for restricted time series data required'
            authSession = self._getAuthSessionWrapper(dataSelectInv, msg)
            dataselect1.putChild('queryauth', authSession)
            dataselect1.putChild('version', serviceVersion)
            fileRes = static.File(os.path.join(shareDir, 'dataselect.wadl'))
            fileRes.childNotFound = NoResource()
            dataselect1.putChild('application.wadl', fileRes)
            fileRes = static.File(
                os.path.join(shareDir, 'dataselect-builder.html'))
            fileRes.childNotFound = NoResource()
            dataselect1.putChild('builder', fileRes)

            if self._authEnabled:
                dataselect1.putChild(
                    'auth', AuthResource(self._authGnupgHome, self._userdb))

        # event
        if self._serveEvent:
            event = ListingResource()
            prefix.putChild('event', event)
            event1 = DirectoryResource(os.path.join(shareDir, 'event.html'))
            event.putChild('1', event1)

            event1.putChild(
                'query',
                FDSNEvent(self._hideAuthor, self._evaluationMode,
                          self._eventTypeWhitelist, self._eventTypeBlacklist))
            fileRes = static.File(os.path.join(shareDir, 'catalogs.xml'))
            fileRes.childNotFound = NoResource()
            event1.putChild('catalogs', fileRes)
            fileRes = static.File(os.path.join(shareDir, 'contributors.xml'))
            fileRes.childNotFound = NoResource()
            event1.putChild('contributors', fileRes)
            event1.putChild('version', serviceVersion)
            fileRes = static.File(os.path.join(shareDir, 'event.wadl'))
            fileRes.childNotFound = NoResource()
            event1.putChild('application.wadl', fileRes)
            fileRes = static.File(os.path.join(shareDir, 'event-builder.html'))
            fileRes.childNotFound = NoResource()
            event1.putChild('builder', fileRes)

        # station
        if self._serveStation:
            station = ListingResource()
            prefix.putChild('station', station)
            station1 = DirectoryResource(os.path.join(shareDir,
                                                      'station.html'))
            station.putChild('1', station1)

            station1.putChild(
                'query',
                FDSNStation(stationInv, self._allowRestricted,
                            self._queryObjects))
            station1.putChild('version', serviceVersion)
            fileRes = static.File(os.path.join(shareDir, 'station.wadl'))
            fileRes.childNotFound = NoResource()
            station1.putChild('application.wadl', fileRes)
            fileRes = static.File(
                os.path.join(shareDir, 'station-builder.html'))
            fileRes.childNotFound = NoResource()
            station1.putChild('builder', fileRes)

        # static files
        fileRes = static.File(os.path.join(shareDir, 'js'))
        fileRes.childNotFound = NoResource()
        fileRes.hideInListing = True
        prefix.putChild('js', fileRes)

        fileRes = static.File(os.path.join(shareDir, 'css'))
        fileRes.childNotFound = NoResource()
        fileRes.hideInListing = True
        prefix.putChild('css', fileRes)

        retn = False
        try:
            # start listen for incoming request
            reactor.listenTCP(self._port, Site(root), self._connections,
                              self._listenAddress)

            # start processing
            Logging.info("start listening")
            log.addObserver(logSC3)

            reactor.run()
            retn = True
        except Exception, e:
            Logging.error(str(e))
Esempio n. 30
0
    def add_script(self, script):
        """Adds a script to the page
        """

        self.putChild(script.prefix, static.File(script.path))