# -*- Mode: Python -*- import os import sys import coro import caesure.proto from caesure.bitcoin import * import coro.asn1.python import coro.asn1.ber from coro.log import Facility LOG = Facility('db') # pub/sub for new blocks. class BlockBroker: def __init__(self): self.q = coro.fifo() self.subs = set() coro.spawn(self.fanout_thread) def fanout_thread(self): # Note: this thread is overkill, just have publish do the fanout. while 1: ob = self.q.pop() for sub in self.subs: sub.push(ob)
# -*- Mode: Python -*- import os import struct from pprint import pprint as pp import sys from caesure.script import pprint_script, OPCODES, parse_script, is_unspendable, VerifyError from caesure.block_db import BlockDB from caesure.bitcoin import * from caesure.txfaa import UTXO_Map, UTXO_Scan_Map import coro from coro.log import Facility LOG = Facility('ledger') class RecentBlocks: def __init__(self, ledger, db): self.db = db # we always begin with one tip. self.blocks = {ledger.block_name: ledger} # these will be recomputed upon the call to self.new_block() self.root = set([ledger]) self.leaves = set([ledger]) # we keep a horizon of this many blocks back from the tip. self.horizon = 20 self.highest = 0 def new_block(self, block, verify=False): from __main__ import G
# -*- Mode: Python -*- import coro import amqp_shrapnel from coro.log import Facility LOG = Facility('consumer') # how to run this test: # 1) run this script. it will print something like this: # $ /usr/local/bin/python test/t0.py # # 2) Now, in another window run either t1.py (uses amqplib) or t2.py (uses this library): # # $ python test/t2.py # published! # -1: Tue Jan 10 12:49:27 2012 Exiting... # # In the first window you should see the message show up. # set this to see AMQP protocol-level info. debug = False def t0(): c = amqp_shrapnel.client(('guest', 'guest'), '127.0.0.1', heartbeat=30) c.debug = debug c.go() ch = c.channel()
import http_date import mimetypes import os import re from coro import read_stream import socket import stat import sys import time import zlib from protocol import latch, http_file, header_set, HTTP_Upgrade from coro.log import Facility LOG = Facility('http') W = sys.stderr.write __version__ = b'0.1' class request_stream: def __init__(self, conn, stream): self.timeout = conn.server.client_timeout self.conn = conn self.stream = stream def get_request(self): request_line = self.stream.read_line() if not request_line:
# -*- Mode: Python -*- import re import random import struct import time import coro from caesure.bitcoin import dhash, network from caesure.proto import VERSION, pack_inv, unpack_version, unpack_reject from coro.log import Facility LOG = Facility('conn') def make_nonce(): return random.randint(0, 1 << 64) ipv6_server_re = re.compile('\[([A-Fa-f0-9:]+)\]:([0-9]+)') ipv4_server_re = re.compile('([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+):([0-9]+)') def parse_addr_arg(addr): m = ipv4_server_re.match(addr) if not m: m = ipv6_server_re.match(addr) if not m: raise ValueError("bad server address: %r" % (addr, )) ip0, port0 = m.groups()
# -*- Mode: Python -*- """ Implements the AMQP protocol for Shrapnel. """ import struct import coro import spec import rpc import sys from coro.log import Facility LOG = Facility ('amqp') from pprint import pprint as pp is_a = isinstance W = sys.stderr.write class AMQPError (Exception): pass class ProtocolError (AMQPError): pass class UnexpectedClose (AMQPError): pass class AuthenticationError (AMQPError): pass def dump_ob (ob):
# -*- Mode: Python -*- import os import pickle import random import coro from coro.log import Facility LOG = Facility('addrcache') # May 2014 fetched from https://github.com/bitcoin/bitcoin/blob/master/src/chainparams.cpp dns_seeds = [ "seed.bitcoin.sipa.be", "dnsseed.bluematt.me", # down? #"dnsseed.bitcoin.dashjr.org", "seed.bitcoinstats.com", "seed.bitnodes.io", "bitseed.xf2.org", ] # really need that pattern match compiler! # ipv4 not routable: # [10, ...] # [192, 168, ...] # [172, 16, ...] # [169, 254, ...] # ipv6 not routable # [0xfc, ...] # unique local # [0xfd, ...] # unique local
# -*- Mode: Python -*- import struct import coro import sys from coro.http import connection, tlslite_server, openssl_server, http_request from coro.http.protocol import header_set, http_file from coro.http.zspdy import inflator, deflator, unpack_control_frame, pack_control_frame from coro.http.zspdy import pack_data_frame, pack_http_header, unpack_http_header W = coro.write_stderr from coro.log import Facility LOG = Facility('spdy') # tricky bits: # # It's important to use one zlib compression object per connection, # the protocol assumes it and won't work if you try to create a # new context per request/stream # # The protocol looks like it supports a generic 'stream' facility, but it does not. # Each 'stream' is really a single request/reply, and the HTTP headers are part of # SYN_STREAM/SYN_REPLY. In other words, SPDY is very HTTP-centric. # When a reply is large (say >1MB) we still get a form of head-blocking behavior # unless we chop it up into bits. Think about an architecture that would # automatically do that. [i.e., a configurable max size for data frames]
import struct import sys from pprint import pprint as pp from caesure import block_db from caesure import ledger from caesure import proto from caesure import script from caesure.bitcoin import * from caesure.asn1_log import ASN1_Logger from caesure.connection import BaseConnection, parse_addr_arg from caesure.addrcache import AddressCache from coro.log import Facility LOG = Facility('server') ticks_to_sec = coro.tsc_time.ticks_to_sec def secs_since(t0): return float(coro.now - t0) / coro.ticks_per_sec def get_random_connection(): "get a random live connection" conns = [] for addr, c in G.connection_map.iteritems(): if secs_since(c.last_packet) < 30: conns.append(c) if len(conns):
import coro import mimetypes import os import re import stat import sys import time import zlib from coro.http.http_date import build_http_date, parse_http_date from coro.log import Facility from urllib import unquote LOG = Facility('http handlers') # these two aren't real handlers, they're more like templates # to give you an idea how to write one. class post_handler: def match(self, request): # override to do a better job of matching return request._method == 'post' def handle_request(self, request): data = request.file.read() request.done() class put_handler:
Python prompt in a process. Simply spawn a thread running the `serve` function to start a backdoor server. """ VERSION_STRING = '$Id: //prod/main/ap/shrapnel/coro/backdoor.py#6 $' import coro import cStringIO import fcntl import sys import traceback import os from coro.log import Facility LOG = Facility('backdoor') # Originally, this object implemented the file-output api, and set # sys.stdout and sys.stderr to 'self'. However, if any other # coroutine ran, it would see the captured definition of sys.stdout, # and would send its output here, instead of the expected place. Now # the code captures all output using StringIO. A little less # flexible, a little less efficient, but much less surprising! # [Note: this is exactly the same problem addressed by Scheme's # dynamic-wind facility] class backdoor: def __init__(self, sock, line_separator='\r\n',
:Variables: - `dns_request`: counter. Incremented for each top-level query (not incremented for additional queries required for recursive queries). - `net_request`: counter. A query is being sent over the network. - `cache_hit`: counter. A value was successfully retrieved from the cache. This does not include expired entries. - `cache_miss`: counter. A value was not found in the cache. This does not include expired entries. - `cache_exception`: counter. A "negative" cache entry was hit. - `cache_expired`: counter. A value was found in the cache, but has expired. """ from coro.log import Facility LOG = Facility ('dns') # Two kinds of negatively-cached data CACHE_NXDOMAIN = "NXDOMAIN" CACHE_NODATA = "NODATA" # how long (in seconds) to cache non-rfc2308 negative responses DEFAULT_NEGATIVE_TTL = 0 # limit on the amount of work we will do for any one query RUNAWAY = 40 # limit on the amount of gluelessness we will take for any one query GLUELESSNESS = 3 DEFAULT_MIN_TTL = 1800
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import coro from coro.asn1.ber import * from coro.ldap.query import * import re from coro.log import Facility LOG = Facility('ldap') # these should be exported by ber.pyx FLAGS_STRUCTURED = 0x20 FLAGS_APPLICATION = 0x40 FLAGS_CONTEXT = 0x80 W = coro.write_stderr re_dn = re.compile(r'\s*([,=])\s*') re_dn_attr = re.compile(r'^([^,]+)(=[^,]+)(,.*)?$') class ProtocolError(Exception): """An LDAP Protocol Error occurred""" pass
import coro import errno import os import socket import struct import sys # for coro.ssl.Error. I would like a better way to do this. import coro.ssl # for encoding names, args & results. from coro.asn1.python import encode, decode from coro.log import Facility LOG = Facility('rpc') class ReadyQueue: "queue that blocks on pop() but not on push()" def __init__(self): self.q = [] self.cv = coro.condition_variable() def __len__(self): return len(self.q) def push(self, item): if not self.cv.wake_one(item):