def _broadcast_config(self): # Read the rest of the config with database.transaction() as t: t.execute("SELECT name, type, maxsize, hashsize FROM fwup_sets") self.__sets = dict(map(lambda (name, tp, maxsize, hashsize): (name, (tp, maxsize, hashsize)), t.fetchall())) self.__config_message = self.__build_config() self.broadcast(self.__config_message)
def removeTx(self, nettype, txid): conn = network_conn(nettype) tx = get_tx(conn, txid) if not tx: raise ttypes.NotFound() with database.transaction(conn) as conn: return remove_tx(conn, tx)
def __keep_storing(): """ Run in separate thread. It keeps getting stuff from the queue and pushing it to the database. This effectively makes waiting for the databes commit asynchronous. """ global __condition global __queue logger.info('Activity thread started') run = True while run: actions = None with __condition: while not __queue: __condition.wait() actions = __queue __queue = [] try: with database.transaction() as t: for action in actions: if not action(t): run = False except Exception as e: logger.error("Unexpected exception in activity thread, ignoring: %s", e) logger.info('Activity thread terminated')
def __init__(self, message, hosts): Task.__init__(self) self.__message = message self.__hosts = hosts with database.transaction() as t: t.execute("SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC'") (self.__batch_time,) = t.fetchone()
def update_bills(source, files=None): if source == 'redis': queue = RedisQueue(REDIS_KEYS['insert_bills_db'], **REDIS_SETTINGS) files = (bill_filepath(bill_id) for bill_id in queue) elif source.startswith('db'): with transaction() as session: assembly_id = session.query(Election)\ .order_by(Election.age.desc())\ .first().age # FIXME: filter finished bills out bill_ids = (record[0] for record in session.query(Bill.id).filter_by(age=assembly_id)) # ranged query m = re.match(r'db\[(\d*):(\d*)\]', source) if m: start, end = m.group(1), m.group(2) offset = int(start) if start else 0 limit = int(end) - offset if end else None if offset: bill_ids = bill_ids.offset(offset) if limit: bill_ids = bill_ids.limit(limit) files = (bill_filepath(bill_id) for bill_id in bill_ids) elif files: files = [f for path in files for f in glob(path)] update_bills_from_files(files)
def linkBlock(self, nettype, blockhash, txids): conn = network_conn(nettype) with database.transaction(conn, isolation='serializable') as conn: block = get_block(conn, blockhash) if not block: raise ttypes.NotFound() link_txes(conn, block, txids)
def insert_bill_keywords(files): with transaction() as session: keyword_store = KeywordStore(session) for file_ in glob(files): filename = basename(file_) print 'processing %s' % filename sys.stdout.flush() bill_id = filename.split('.', 1)[0] with open(file_, 'r') as f: keywords = extract_keywords(f) keyword_ids = [keyword_store.id(keyword[0]) for keyword in keywords] keyword_store.sync() existing_keywords_for_bill = set(bk.keyword_id for bk in session.query(bill_keyword)\ .filter(bill_keyword.c.bill_id == bill_id) ) new_bill_keywords = [ { 'bill_id': bill_id, 'keyword_id': keyword_id, 'weight': weight, } for (_, weight), keyword_id in izip(keywords, keyword_ids) if keyword_id not in existing_keywords_for_bill ] if new_bill_keywords: session.execute(bill_keyword.insert(), new_bill_keywords)
def store_counts(data, stats, now): logger.info('Storing count snapshot') with database.transaction() as t: t.execute('SELECT name, id FROM count_types ORDER BY ord') name_data = t.fetchall() name_order = map(lambda x: x[0], name_data) names = dict(name_data) # It seems MySQL complains with insert ... select in some cases. # So we do some insert-select-insert magic here. That is probably # slower, but no idea how to help that. And it should work. t.execute('SELECT name, id FROM clients WHERE name IN (' + (','.join(['%s'] * len(data))) + ')', data.keys()) clients = dict(t.fetchall()) # Create a snapshot for each client t.executemany('INSERT INTO count_snapshots (timestamp, client) VALUES(%s, %s)', map(lambda client: (now, client), clients.values())) t.execute('SELECT client, id FROM count_snapshots WHERE timestamp = %s', (now,)) snapshots = dict(t.fetchall()) # Push all the data in def truncate(data, limit): if data > 2**limit-1: logger.warn("Number %s overflow, truncating to %s", data, 2**limit-1) return 2**limit-1 else: return data def clientdata(client): snapshot = snapshots[clients[client]] l = min(len(data[client]) / 2, len(name_order)) return map(lambda name, index: (snapshot, names[name], truncate(data[client][index * 2], 63), truncate(data[client][index * 2 + 1], 63)), name_order[:l], range(0, l)) def clientcaptures(client): snapshot = snapshots[clients[client]] return map(lambda i: (snapshot, i, truncate(stats[client][3 * i], 31), truncate(stats[client][3 * i + 1], 31), truncate(stats[client][3 * i + 2], 31)), range(0, len(stats[client]) / 3)) def join_clients(c1, c2): c1.extend(c2) return c1 t.executemany('INSERT INTO counts(snapshot, type, count, size) VALUES(%s, %s, %s, %s)', reduce(join_clients, map(clientdata, data.keys()))) t.executemany('INSERT INTO capture_stats(snapshot, interface, captured, dropped, dropped_driver) VALUES(%s, %s, %s, %s, %s)', reduce(join_clients, map(clientcaptures, stats.keys())))
def insert_bills(files): with transaction() as session: bill_statuses.init(session) for file_ in glob(files): with open(file_, 'r') as f: record = json.load(f) insert_bill(session, record) bill_statuses.insert_all()
def update_people(filename): if not filename: return with transaction() as session: headers, records = read_csv(filename, headers=True) for record in records: update_person(session, headers, record)
def __reload_config(self): with database.transaction() as t: t.execute("SELECT name, value FROM config WHERE plugin = 'spoof'") config = dict(t.fetchall()) self.__answer_timeout = int(config['answer_timeout']) self.__dest_addr = config['dest_addr'] self.__src_addr = config['src_addr'] self.__port = int(config['port']) self.__interval = config['interval']
def check_schedule(self): with database.transaction() as t: t.execute("SELECT m.m + i.i <= CURRENT_TIMESTAMP AT TIME ZONE 'UTC' FROM (SELECT COALESCE(MAX(batch), TO_TIMESTAMP(0)) AS m FROM nats) AS m CROSS JOIN (SELECT value::INTERVAL AS i FROM config WHERE plugin = 'sniff' AND name = 'nat-interval') AS i;") (time_s_up,) = t.fetchone() if time_s_up: return [NatTask()] else: logger.debug('Not sniffing NAT yet') return []
def _broadcast_config(self): # Read the rest of the config with database.transaction() as t: t.execute("SELECT name, type, maxsize, hashsize FROM fwup_sets") self.__sets = dict( map( lambda (name, tp, maxsize, hashsize): (name, (tp, maxsize, hashsize)), t.fetchall())) self.__config_message = self.__build_config() self.broadcast(self.__config_message)
def insert_candidacies(files, age, date): with transaction() as session: for file_ in glob(files): with open(file_, 'r') as f: list_ = json.load(f) for record in list_: person_id = insert_person(session, record) insert_party(session, record) insert_election(session, age, date) insert_candidacy(session, record, person_id, date)
def check_schedule(self): with database.transaction() as t: t.execute( "SELECT m.m + i.i <= CURRENT_TIMESTAMP AT TIME ZONE 'UTC' FROM (SELECT COALESCE(MAX(batch), TO_TIMESTAMP(0)) AS m FROM nats) AS m CROSS JOIN (SELECT value::INTERVAL AS i FROM config WHERE plugin = 'sniff' AND name = 'nat-interval') AS i;" ) (time_s_up, ) = t.fetchone() if time_s_up: return [NatTask()] else: logger.debug('Not sniffing NAT yet') return []
def client_connected(self, client): name = client.cid() with database.transaction() as t: t.execute( 'SELECT groups.name FROM groups JOIN group_members ON groups.id = group_members.in_group JOIN clients ON group_members.client = clients.id WHERE clients.name = %s', (name, )) groups = map(lambda g: g[0], t.fetchall()) self.__clients[name] = buckets.client.Client( name, groups, lambda message: self.send(message, name)) # Add the client to all the groups in each criterion for g in self.__clients[name].groups(): self.__enter_group(name, g)
async def test_transaction_rollback_on_exception(): await reset_db() assert await count(db, "statistics") == 0 try: async with db.transaction() as tx: await tx.execute( "insert into statistics (statistics_id, content) values ('TEST_STATISTIC', '{}')" ) assert await count(tx, "statistics") == 1 raise Exception("EXPECTED") except Exception as e: assert str(e) == "EXPECTED" assert await count(db, "statistics") == 0
def add(user_name, comment): user_name = user_name.encode('latin1').decode('utf8') comment = comment.encode('latin1').decode('utf8') created_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S') with transaction() as cursor: cursor.execute( ''' INSERT INTO `comments` (`user_name`, `body`, `created_at`) VALUES (?, ?, ?) ''', (user_name, comment, created_at))
def add(user_name, comment): user_name = user_name.encode('latin1').decode('utf8') comment = comment.encode('latin1').decode('utf8') created_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S') with transaction() as cursor: cursor.execute(''' INSERT INTO `comments` (`user_name`, `body`, `created_at`) VALUES (?, ?, ?) ''', (user_name, comment, created_at) )
def store_flows(max_records, client, message, expect_conf_id, now): (header, message) = (message[:12], message[12:]) (conf_id, calib_time) = struct.unpack('!IQ', header) if conf_id != expect_conf_id: logger.warn('Flows of different config (%s vs. %s) received from client %s', conf_id, expect_conf_id, client) if not message: logger.warn('Empty list of flows from %s', client) return values = [] count = 0 while message: (flow, message) = (message[:61], message[61:]) (flags, cin, cout, sin, sout, ploc, prem, tbin, tbout, tein, teout) = struct.unpack('!BIIQQHHQQQQ', flow) v6 = flags & 1 udp = flags & 2 in_started = not not (flags & 4) out_started = not not (flags & 8) in_ended = not not (flags & 16) out_ended = not not (flags & 32) rst_seen = not not (flags & 64) if v6: size = 16 tp = socket.AF_INET6 else: size = 4 tp = socket.AF_INET (aloc, arem, message) = (message[:size], message[size:2 * size], message[2 * size:]) (aloc, arem) = map (lambda addr: socket.inet_ntop(tp, addr), (aloc, arem)) if udp: proto = 'U' else: proto = 'T' logger.trace("Flow times: %s, %s, %s, %s, %s (%s/%s packets)", calib_time, tbin, tbout, tein, teout, cin, cout) ok = True for v in (tbin, tein, tbout, teout): if v > 0 and calib_time - v > 86400000: logger.error("Time difference out of range for client %s: %s/%s", client, calib_time - v, v) ok = False if ok: values.append((aloc, arem, ploc, prem, proto, now, calib_time - tbin if tbin > 0 else None, now, calib_time - tbout if tbout > 0 else None, now, calib_time - tein if tein > 0 else None, now, calib_time - teout if teout > 0 else None, cin, cout, sin, sout, in_started, out_started, in_ended, out_ended, rst_seen, now, client)) count += 1 if count > max_records: logger.warn("Unexpectedly high number of flows in the message from client %s - %s connection, max expected %s. Ignoring.", client, count, max_records) return with database.transaction() as t: t.executemany("INSERT INTO biflows (client, ip_local, ip_remote, port_local, port_remote, proto, start_in, start_out, stop_in, stop_out, count_in, count_out, size_in, size_out, seen_start_in, seen_start_out, seen_end_in, seen_end_out, seen_rst, tagged_on) SELECT clients.id, %s, %s, %s, %s, %s, %s - %s * INTERVAL '1 millisecond', %s - %s * INTERVAL '1 millisecond', %s - %s * INTERVAL '1 millisecond', %s - %s * INTERVAL '1 millisecond', %s, %s, %s, %s, %s, %s, %s, %s, %s, %s FROM clients WHERE clients.name = %s", values) logger.debug("Stored %s flows for %s", count, client)
def update_bills_from_files(files): if not files: return with transaction() as session: bill_statuses.init(session) for f in files: try: if not isinstance(f, file): f = open(f, 'r') with f: record = json.load(f) except e: print >> sys.stderr, e continue insert_bill(session, record) bill_statuses.insert_all()
async def archive_channel(guild_id, channel_id): async with db.transaction() as tx: latest_id = await latest_message_id(tx, channel_id) all_messages = await fetch_messages_from(channel_id, latest_id) if len(all_messages) > 0: for message in all_messages: await upsert_message(tx, guild_id, message) new_latest_id = all_messages[0]["id"] await update_latest_message_id(tx, guild_id, channel_id, new_latest_id) stored_guild_id = await get_guild_id(tx, channel_id) if stored_guild_id is None: await set_guild_id(tx, channel_id, guild_id) log.info("Fetched total %d messages", len(all_messages))
def store_logs(message, client, now, version): values = [] count = 0 while message: if version <= 1: (age, type_idx, family_idx, info_count, code) = struct.unpack('!IBBBc', message[:8]) rem_port = None message = message[8:] else: (age, type_idx, family_idx, info_count, code, rem_port) = struct.unpack('!IBBBcH', message[:10]) message = message[10:] (name, passwd, reason) = (None, None, None) tp = types[type_idx] family = families[family_idx] rem_address = socket.inet_ntop(family['opt'], message[:family['len']]) message = message[family['len']:] if version <= 1: loc_address = None else: loc_address = socket.inet_ntop(family['opt'], message[:family['len']]) message = message[family['len']:] for i in range(0, info_count): (kind_i, ) = struct.unpack('!B', message[0]) (content, message) = protocol.extract_string(message[1:]) # Twisted gives us the message as a string. The name and password # columns are bytea in postgres. This needs to be resolved by # a conversion wrapper (because python seems to use escaping, not # bound params) if kind_i == 0: name = psycopg2.Binary(content) elif kind_i == 1: passwd = psycopg2.Binary(content) elif kind_i == 2: reason = content values.append((now, age, tp, rem_address, loc_address, rem_port, name, passwd, reason, client, code)) count += 1 with database.transaction() as t: t.executemany( "INSERT INTO fake_logs (client, timestamp, event, remote, local, remote_port, server, name, password, reason) SELECT clients.id, %s - %s * INTERVAL '1 millisecond', %s, %s, %s, %s, fake_server_names.type, %s, %s, %s FROM clients CROSS JOIN fake_server_names WHERE clients.name = %s AND fake_server_names.code = %s", values) logger.debug("Stored %s fake server log events for client %s", count, client)
async def upsert_users(users): if not all(is_full_user(user) for user in users): log.info("Not all users were full") return async with db.transaction() as tx: for user in users: log.info("Updating user {0}".format(user)) await tx.execute( """ INSERT INTO discord_user (user_id, name, raw) VALUES ($1, $2, $3) ON CONFLICT (user_id) DO UPDATE SET name = EXCLUDED.name, raw = EXCLUDED.raw """, user.get("id"), user.get("username"), json.dumps(user))
def store_connections(message, client, now): (basetime,) = struct.unpack('!Q', message[:8]) message = message[8:] values = [] count = 0 while message: (time, reason, family, loc_port, rem_port) = struct.unpack('!QcBHH', message[:14]) addr_len = 4 if family == 4 else 16 address = message[14:14 + addr_len] address = socket.inet_ntop(socket.AF_INET if family == 4 else socket.AF_INET6, address) message = message[14 + addr_len:] if basetime - time > 86400000: logger.error("Refused time difference is out of range for client %s: %s", client, basetime - time) continue values.append((now, basetime - time, address, loc_port, rem_port, reason, client)) count += 1 with database.transaction() as t: t.executemany("INSERT INTO refused (client, timestamp, address, local_port, remote_port, reason) SELECT clients.id, %s - %s * INTERVAL '1 millisecond', %s, %s, %s, %s FROM clients WHERE clients.name = %s", values) logger.debug("Stored %s refused connections for client %s", count, client)
def __diff_update(self, name, full, epoch, from_version, to_version, prefix): key = (name, full, epoch, from_version, to_version) if key in self.__cache: # Someone already asked for this, just reuse the result instead of asking the DB return self.__cache[key] with database.transaction() as t: t.execute(self.__diff_query, (name, epoch, from_version, to_version, name, epoch)) addresses = t.fetchall() params = [len(name), name, full, epoch] if not full: params.append(from_version) params.append(to_version) result = 'D' + prefix + struct.pack('!I' + str(len(name)) + 's?II' + ('' if full else 'I'), *params) for (address, add) in addresses: if not add and full: continue # Don't mention deleted addresses on full update addr = addr_convert(address, self.__logger) self.__logger.trace("Addr: %s/%s", repr(addr), len(addr)) result += struct.pack('!B', len(addr) + add) + addr self.__cache[key] = result return result
async def cmd_osu_add(client, message, arg): user = arg.strip() if not user: return log.info(f"Adding osu player '{user}' by user {message.author.id} ({message.author.name})") user_std = await api.user(user, Mode.Standard) user_taiko = await api.user(user, Mode.Taiko) user_catch = await api.user(user, Mode.Catch) user_mania = await api.user(user, Mode.Mania) if not user_std or not user_taiko or not user_catch or not user_mania: return await message.channel.send("User could not be found") try: async with db.transaction() as tx: await tx.execute("INSERT INTO osuuser (osuuser_id, channel_id) VALUES ($1, $2)", user_std.id, str(message.channel.id)) await tx.execute("INSERT INTO osupp (osuuser_id, osugamemode_id, pp, rank, changed) VALUES ($1, 'STANDARD', $2, $3, current_timestamp)", user_std.id, user_std.pp, user_std.rank) await tx.execute("INSERT INTO osupp (osuuser_id, osugamemode_id, pp, rank, changed) VALUES ($1, 'TAIKO', $2, $3, current_timestamp)", user_taiko.id, user_taiko.pp, user_taiko.rank) await tx.execute("INSERT INTO osupp (osuuser_id, osugamemode_id, pp, rank, changed) VALUES ($1, 'CATCH', $2, $3, current_timestamp)", user_catch.id, user_catch.pp, user_catch.rank) await tx.execute("INSERT INTO osupp (osuuser_id, osugamemode_id, pp, rank, changed) VALUES ($1, 'MANIA', $2, $3, current_timestamp)", user_mania.id, user_mania.pp, user_mania.rank) except UniqueViolationError: return await message.channel.send(f"User is already added")
def __check(self): """ Check the DB to see if we should ask for another round of spoofed packets. """ if self.__sent: return # Still sending bursts self.__reload_config() with database.transaction() as t: t.execute("SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC', COALESCE(MAX(batch) + INTERVAL %s < CURRENT_TIMESTAMP AT TIME ZONE 'UTC', TRUE) FROM spoof", (self.__interval,)); (self.__now, run) = t.fetchone() if not run: logger.debug("Too early to ask for spoofed packets") return logger.info('Asking clients to send spoofed packets') self.__batch = set() self.__prefix = '4' + \ socket.inet_pton(socket.AF_INET, socket.gethostbyname(self.__src_addr)) + \ socket.inet_pton(socket.AF_INET, socket.gethostbyname(self.__dest_addr)) + \ struct.pack("!H", self.__port) self.__sent = set() self.__do_send()
def store_certs(client, payload, hosts, batch_time, now): with database.transaction() as t: for (rid, want_details, want_params) in hosts: (count,) = struct.unpack("!B", payload[0]) payload = payload[1:] if count > 0: cipher = None proto = None if want_params: (cipher, payload) = extract_string(payload) (proto, payload) = extract_string(payload) t.execute("INSERT INTO certs (request, client, batch, timestamp, proto, cipher) SELECT %s, clients.id, %s, %s, %s, %s FROM clients WHERE name = %s RETURNING id", (rid, batch_time, now, proto, cipher, client)) (cert_id,) = t.fetchone() for i in range(0, count): (cert, payload) = extract_string(payload) if want_details: (name, payload) = extract_string(payload) (expiry, payload) = extract_string(payload) else: name = None expiry = None t.execute("INSERT INTO cert_chains (cert, ord, is_full, value, name, expiry) VALUES(%s, %s, %s, %s, %s, %s)", (cert_id, i, len(cert) > 40, cert, name, dateutil.parser.parse(expiry).isoformat() if expiry else None))
def __check_conf(self): self.__logger.trace("Checking %s configs", self.__plugname) with database.transaction() as t: t.execute("SELECT name, value FROM config WHERE plugin = '" + self.__plugname + "'") config = dict(t.fetchall()) t.execute(self.__version_query) addresses = {} for (name, epoch, version) in t.fetchall(): addresses[name] = (epoch, version) addresses_orig = self._addresses self._addresses = addresses if self._conf != config: self.__logger.info("Config changed, broadcasting") self._conf = config self.__cache = {} self._broadcast_config() if addresses_orig != addresses: self.__cache = {} for a in addresses: if addresses_orig.get(a) != addresses[a]: self.__logger.debug("Broadcasting new version of %s", a) self._broadcast_version(a, addresses[a][0], addresses[a][1])
def store_keys(groups, now): logger.info("Storing buckets") with database.transaction() as t: def aggregate(l1, l2): l1.extend(l2) return l1 def cdata(crit): def gdata(gname): group = groups[crit][gname] (keys, count, strengths) = group.keys_extract() return map( lambda key: (crit, now, key, len(keys[key]), count, strengths[key], gname), keys.keys()) return reduce(aggregate, map(gdata, groups[crit].keys())) data = reduce(aggregate, map(cdata, groups.keys())) t.executemany( 'INSERT INTO anomalies(from_group, type, timestamp, value, relevance_count, relevance_of, strength) SELECT groups.id, %s, %s, %s, %s, %s, %s FROM groups WHERE groups.name = %s', data)
def store_logs(message, client, now, version): values = [] count = 0 while message: if version <= 1: (age, type_idx, family_idx, info_count, code) = struct.unpack('!IBBBc', message[:8]) rem_port = None message = message[8:] else: (age, type_idx, family_idx, info_count, code, rem_port) = struct.unpack('!IBBBcH', message[:10]) message = message[10:] (name, passwd, reason) = (None, None, None) tp = types[type_idx] family = families[family_idx] rem_address = socket.inet_ntop(family['opt'], message[:family['len']]) message = message[family['len']:] if version <= 1: loc_address = None else: loc_address = socket.inet_ntop(family['opt'], message[:family['len']]) message = message[family['len']:] for i in range(0, info_count): (kind_i,) = struct.unpack('!B', message[0]) (content, message) = protocol.extract_string(message[1:]) # Twisted gives us the message as a string. The name and password # columns are bytea in postgres. This needs to be resolved by # a conversion wrapper (because python seems to use escaping, not # bound params) if kind_i == 0: name = psycopg2.Binary(content) elif kind_i == 1: passwd = psycopg2.Binary(content) elif kind_i == 2: reason = content values.append((now, age, tp, rem_address, loc_address, rem_port, name, passwd, reason, client, code)) count += 1 with database.transaction() as t: t.executemany("INSERT INTO fake_logs (client, timestamp, event, remote, local, remote_port, server, name, password, reason) SELECT clients.id, %s - %s * INTERVAL '1 millisecond', %s, %s, %s, %s, fake_server_names.type, %s, %s, %s FROM clients CROSS JOIN fake_server_names WHERE clients.name = %s AND fake_server_names.code = %s", values) logger.debug("Stored %s fake server log events for client %s", count, client)
def __check(self): """ Check the DB to see if we should ask for another round of spoofed packets. """ if self.__sent: return # Still sending bursts self.__reload_config() with database.transaction() as t: t.execute( "SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC', COALESCE(MAX(batch) + INTERVAL %s < CURRENT_TIMESTAMP AT TIME ZONE 'UTC', TRUE) FROM spoof", (self.__interval, )) (self.__now, run) = t.fetchone() if not run: logger.debug("Too early to ask for spoofed packets") return logger.info('Asking clients to send spoofed packets') self.__batch = set() self.__prefix = '4' + \ socket.inet_pton(socket.AF_INET, socket.gethostbyname(self.__src_addr)) + \ socket.inet_pton(socket.AF_INET, socket.gethostbyname(self.__dest_addr)) + \ struct.pack("!H", self.__port) self.__sent = set() self.__do_send()
def __diff_update(self, name, full, epoch, from_version, to_version, prefix): key = (name, full, epoch, from_version, to_version) if key in self.__cache: # Someone already asked for this, just reuse the result instead of asking the DB return self.__cache[key] with database.transaction() as t: t.execute(self.__diff_query, (name, epoch, from_version, to_version, name, epoch)) addresses = t.fetchall() params = [len(name), name, full, epoch] if not full: params.append(from_version) params.append(to_version) result = 'D' + prefix + struct.pack( '!I' + str(len(name)) + 's?II' + ('' if full else 'I'), *params) for (address, add) in addresses: if not add and full: continue # Don't mention deleted addresses on full update addr = addr_convert(address, self.__logger) self.__logger.trace("Addr: %s/%s", repr(addr), len(addr)) result += struct.pack('!B', len(addr) + add) + addr self.__cache[key] = result return result
def addTxList(self, nettype, txes, mempool): conn = network_conn(nettype) with database.transaction(conn) as conn: verified_txes = [] for tx in txes: logging.info('saving tx %s', tx.hash.encode('hex')) if mempool: v, m = verify_tx_mempool(conn, tx) else: v, m = verify_tx_chain(conn, tx) if v: verified_txes.append(tx) save_tx(conn, tx) else: logging.warn('verify tx failed %s, message=%s', tx.hash.encode('hex'), m) # POST save methods for dtx in itercol(conn, conn.tx, 'update_addrs.tx._id', len(verified_txes)): update_addrs(conn, dtx) constructive_task(conn, len(verified_txes))
def check_schedule(self): now = int(time.time()) if self.__task_interval + self.__last_task <= now: encoded = '' host_count = 0 hosts = [] with database.transaction() as t: t.execute("SELECT id, host, port, starttls, want_cert, want_chain, want_details, want_params FROM cert_requests WHERE active AND lastrun + interval < CURRENT_TIMESTAMP AT TIME ZONE 'UTC' ORDER BY lastrun + interval LIMIT %s", (self.__batchsize,)) requests = t.fetchall() for request in requests: (rid, host, port, starttls, want_cert, want_chain, want_details, want_params) = request host_count += 1 encoded += encode_host(host, port, starttls, want_cert, want_chain, want_details, want_params) hosts.append((rid, want_details, want_params)) t.execute("UPDATE cert_requests SET lastrun = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' WHERE id = %s", (rid,)) self.__last_task = now if hosts: return [CertTask(struct.pack('!H', host_count) + encoded, hosts)] else: logger.debug('No hosts to ask for certificates yet') return [] else: logger.debug('Not asking for certs yet') return []
def log_query(query): if not query: return log = QueryLog(unicode(query)) with transaction() as session: session.add(log)
def submit_data(client, payload, batch_time): with database.transaction() as t: t.execute( "INSERT INTO nats (batch, client, nat_v4, nat_v6) SELECT %s, clients.id, %s, %s FROM clients WHERE name = %s", (batch_time, decode(payload[0]), decode(payload[1]), client))
def submit_data(client, payload, batch_time): with database.transaction() as t: t.execute("INSERT INTO nats (batch, client, nat_v4, nat_v6) SELECT %s, clients.id, %s, %s FROM clients WHERE name = %s", (batch_time, decode(payload[0]), decode(payload[1]), client))
import random import struct from protocol import extract_string, format_string import logging import activity import auth import time import plugin_versions import database import timers logger = logging.getLogger(name='client') sysrand = random.SystemRandom() challenge_len = 128 # 128 bits of random should be enough for log-in to protect against replay attacks with database.transaction() as t: # As we just started, there's no plugin active anywhere. # Mark anything active as no longer active in the history and # flush the active ones. t.execute( "INSERT INTO plugin_history (client, name, timestamp, active) SELECT client, name, CURRENT_TIMESTAMP AT TIME ZONE 'UTC', false FROM active_plugins" ) t.execute("DELETE FROM active_plugins") class ClientConn(twisted.protocols.basic.Int32StringReceiver): MAX_LENGTH = 1024**3 # A gigabyte should be enough """ Connection from one client. It handles the low-level protocol, sorts the messages, answers pings, times out, etc.
def __init__(self): Task.__init__(self) with database.transaction() as t: t.execute("SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC'") (self.__batch_time, ) = t.fetchone()
def rewindTip(self, nettype, height): conn = network_conn(nettype) with database.transaction(conn) as conn: v, m = rewind_tip(conn, height) if not v: raise ttypes.AppException(code='rewind_failed', message=m)
async def reset_db(): async with db.transaction() as tx: await clear_schema(tx, "public") await set_latest_migration_version(tx)
def store_bandwidth(data, now): logger.info('Storing bandwidth snapshot') with database.transaction() as t: for client, cldata in data.items(): for window in cldata.windows.itervalues(): t.execute("""INSERT INTO bandwidth (client, timestamp, win_len, in_max, out_max) SELECT clients.id AS client, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, window.length, window.in_max, window.out_max, client)) for client, cldata in data.items(): if not cldata.buckets: continue ## Choose data structures according to protocol version BUCKET_MAP = None BUCKETS_CNT = None if cldata.version <= 2: BUCKET_MAP = BUCKET_MAP_PROTO2 BUCKETS_CNT = BUCKETS_CNT_PROTO2 elif cldata.version >= 3: BUCKET_MAP = BUCKET_MAP_PROTO3 BUCKETS_CNT = BUCKETS_CNT_PROTO3 ##### DBG ##### in_time = [0] * BUCKETS_CNT in_bytes = [0] * BUCKETS_CNT out_time = [0] * BUCKETS_CNT out_bytes = [0] * BUCKETS_CNT for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] = bucket.in_time in_bytes[pos] = bucket.in_bytes out_time[pos] = bucket.out_time out_bytes[pos] = bucket.out_bytes t.execute("""INSERT INTO bandwidth_stats_dbg (client, timestamp, timestamp_dbg, in_time, in_bytes, out_time, out_bytes) SELECT clients.id AS client, %s as timestamp, %s, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, cldata.timestamp_dbg, in_time, in_bytes, out_time, out_bytes, client)) ##### /DBG ##### t.execute("""SELECT client, timestamp, in_time, in_bytes, out_time, out_bytes FROM bandwidth_stats JOIN clients ON bandwidth_stats.client = clients.id WHERE name = %s AND timestamp = date_trunc('hour', %s) """, (client, now)) result = t.fetchone() if result == None: in_time = [0] * BUCKETS_CNT in_bytes = [0] * BUCKETS_CNT out_time = [0] * BUCKETS_CNT out_bytes = [0] * BUCKETS_CNT for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] = bucket.in_time in_bytes[pos] = bucket.in_bytes out_time[pos] = bucket.out_time out_bytes[pos] = bucket.out_bytes t.execute("""INSERT INTO bandwidth_stats (client, timestamp, in_time, in_bytes, out_time, out_bytes) SELECT clients.id AS client, date_trunc('hour', %s) as timestamp, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, in_time, in_bytes, out_time, out_bytes, client)) else: client_id = result[0] timestamp = result[1] in_time = result[2] in_bytes = result[3] out_time = result[4] out_bytes = result[5] for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] += bucket.in_time in_bytes[pos] += bucket.in_bytes out_time[pos] += bucket.out_time out_bytes[pos] += bucket.out_bytes t.execute("""UPDATE bandwidth_stats SET in_time = %s, in_bytes = %s, out_time = %s, out_bytes = %s WHERE client = %s AND timestamp = %s """, (in_time, in_bytes, out_time, out_bytes, client_id, timestamp))
import random import struct from protocol import extract_string, format_string import logging import activity import auth import time import plugin_versions import database import timers logger = logging.getLogger(name='client') sysrand = random.SystemRandom() challenge_len = 128 # 128 bits of random should be enough for log-in to protect against replay attacks with database.transaction() as t: # As we just started, there's no plugin active anywhere. # Mark anything active as no longer active in the history and # flush the active ones. t.execute("INSERT INTO plugin_history (client, name, timestamp, active) SELECT client, name, CURRENT_TIMESTAMP AT TIME ZONE 'UTC', false FROM active_plugins") t.execute("DELETE FROM active_plugins") class ClientConn(twisted.protocols.basic.Int32StringReceiver): MAX_LENGTH = 1024 ** 3 # A gigabyte should be enough """ Connection from one client. It handles the low-level protocol, sorts the messages, answers pings, times out, etc. It also routes messages to other parts of system. """ def __init__(self, plugins, addr, fastpings):
def store_bandwidth(data, now): logger.info('Storing bandwidth snapshot') with database.transaction() as t: for client, cldata in data.items(): for window in cldata.windows.itervalues(): t.execute( """INSERT INTO bandwidth (client, timestamp, win_len, in_max, out_max) SELECT clients.id AS client, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, window.length, window.in_max, window.out_max, client)) for client, cldata in data.items(): if not cldata.buckets: continue try: t.execute( """SELECT client, timestamp, in_time, in_bytes, out_time, out_bytes FROM bandwidth_stats JOIN clients ON bandwidth_stats.client = clients.id WHERE name = %s AND timestamp = date_trunc('hour', %s) """, (client, now)) result = t.fetchone() # Use the current data or provide a blank new set. if result: in_time = result[2] in_bytes = result[3] out_time = result[4] out_bytes = result[5] else: in_time = [0] * BUCKETS_CNT in_bytes = [0] * BUCKETS_CNT out_time = [0] * BUCKETS_CNT out_bytes = [0] * BUCKETS_CNT # Update it with the data we just received for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] += bucket.in_time in_bytes[pos] += bucket.in_bytes out_time[pos] += bucket.out_time out_bytes[pos] += bucket.out_bytes # Store it (depending on if we had a previous value, insert or update) if result: client_id = result[0] timestamp = result[1] t.execute( """UPDATE bandwidth_stats SET in_time = %s, in_bytes = %s, out_time = %s, out_bytes = %s WHERE client = %s AND timestamp = %s """, (in_time, in_bytes, out_time, out_bytes, client_id, timestamp)) else: t.execute( """INSERT INTO bandwidth_stats (client, timestamp, in_time, in_bytes, out_time, out_bytes) SELECT clients.id AS client, date_trunc('hour', %s) as timestamp, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, in_time, in_bytes, out_time, out_bytes, client)) except KeyError: # Some clients send invalid data (bucket with index 0). While we need to solve that, we at least don't want to kill data for all the clients in such a case. logger.exception("Broken data from client %s", client)
def store_bandwidth(data, now): logger.info('Storing bandwidth snapshot') with database.transaction() as t: for client, cldata in data.items(): for window in cldata.windows.itervalues(): t.execute( """INSERT INTO bandwidth (client, timestamp, win_len, in_max, out_max) SELECT clients.id AS client, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, window.length, window.in_max, window.out_max, client)) for client, cldata in data.items(): if not cldata.buckets: continue ## Choose data structures according to protocol version BUCKET_MAP = None BUCKETS_CNT = None if cldata.version <= 2: BUCKET_MAP = BUCKET_MAP_PROTO2 BUCKETS_CNT = BUCKETS_CNT_PROTO2 elif cldata.version >= 3: BUCKET_MAP = BUCKET_MAP_PROTO3 BUCKETS_CNT = BUCKETS_CNT_PROTO3 ##### DBG ##### in_time = [0] * BUCKETS_CNT in_bytes = [0] * BUCKETS_CNT out_time = [0] * BUCKETS_CNT out_bytes = [0] * BUCKETS_CNT for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] = bucket.in_time in_bytes[pos] = bucket.in_bytes out_time[pos] = bucket.out_time out_bytes[pos] = bucket.out_bytes t.execute( """INSERT INTO bandwidth_stats_dbg (client, timestamp, timestamp_dbg, in_time, in_bytes, out_time, out_bytes) SELECT clients.id AS client, %s as timestamp, %s, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, cldata.timestamp_dbg, in_time, in_bytes, out_time, out_bytes, client)) ##### /DBG ##### t.execute( """SELECT client, timestamp, in_time, in_bytes, out_time, out_bytes FROM bandwidth_stats JOIN clients ON bandwidth_stats.client = clients.id WHERE name = %s AND timestamp = date_trunc('hour', %s) """, (client, now)) result = t.fetchone() if result == None: in_time = [0] * BUCKETS_CNT in_bytes = [0] * BUCKETS_CNT out_time = [0] * BUCKETS_CNT out_bytes = [0] * BUCKETS_CNT for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] = bucket.in_time in_bytes[pos] = bucket.in_bytes out_time[pos] = bucket.out_time out_bytes[pos] = bucket.out_bytes t.execute( """INSERT INTO bandwidth_stats (client, timestamp, in_time, in_bytes, out_time, out_bytes) SELECT clients.id AS client, date_trunc('hour', %s) as timestamp, %s, %s, %s, %s FROM clients WHERE name = %s """, (now, in_time, in_bytes, out_time, out_bytes, client)) else: client_id = result[0] timestamp = result[1] in_time = result[2] in_bytes = result[3] out_time = result[4] out_bytes = result[5] for bucket in cldata.buckets.itervalues(): pos = BUCKET_MAP[bucket.bucket] in_time[pos] += bucket.in_time in_bytes[pos] += bucket.in_bytes out_time[pos] += bucket.out_time out_bytes[pos] += bucket.out_bytes t.execute( """UPDATE bandwidth_stats SET in_time = %s, in_bytes = %s, out_time = %s, out_bytes = %s WHERE client = %s AND timestamp = %s """, (in_time, in_bytes, out_time, out_bytes, client_id, timestamp))
def addBlock(self, nettype, block, txids): conn = network_conn(nettype) with database.transaction(conn, isolation='serializable') as conn: add_block(conn, block, txids)
def __init__(self): Task.__init__(self) with database.transaction() as t: t.execute("SELECT CURRENT_TIMESTAMP AT TIME ZONE 'UTC'") (self.__batch_time,) = t.fetchone()