def is_valid(self): import dashlib printdbg("In Proposal#is_valid, for Proposal: %s" % self.__dict__) try: # proposal name exists and is not null/whitespace if (len(self.name.strip()) == 0): printdbg("\tInvalid Proposal name [%s], returning False" % self.name) return False # proposal name is normalized (something like "[a-zA-Z0-9-_]+") if not re.match(r'^[-_a-zA-Z0-9]+$', self.name): printdbg( "\tInvalid Proposal name [%s] (does not match regex), returning False" % self.name) return False # end date < start date if (self.end_epoch <= self.start_epoch): printdbg( "\tProposal end_epoch [%s] <= start_epoch [%s] , returning False" % (self.end_epoch, self.start_epoch)) return False # amount must be numeric if misc.is_numeric(self.payment_amount) is False: printdbg( "\tProposal amount [%s] is not valid, returning False" % self.payment_amount) return False # amount can't be negative or 0 if (float(self.payment_amount) <= 0): printdbg( "\tProposal amount [%s] is negative or zero, returning False" % self.payment_amount) return False # payment address is valid base58 dash addr, non-multisig if not dashlib.is_valid_dash_address(self.payment_address, config.network): printdbg( "\tPayment address [%s] not a valid Dash address for network [%s], returning False" % (self.payment_address, config.network)) return False # URL if (len(self.url.strip()) < 4): printdbg("\tProposal URL [%s] too short, returning False" % self.url) return False # proposal URL has any whitespace if (re.search(r'\s', self.url)): printdbg( "\tProposal URL [%s] has whitespace, returning False" % self.name) return False # Dash Core restricts proposals to 512 bytes max if len(self.serialise()) > (self.MAX_DATA_SIZE * 2): printdbg("\tProposal [%s] is too big, returning False" % self.name) return False try: parsed = urlparse.urlparse(self.url) except Exception as e: printdbg( "\tUnable to parse Proposal URL, marking invalid: %s" % e) return False except Exception as e: printdbg( "Unable to validate in Proposal#is_valid, marking invalid: %s" % e.message) return False printdbg("Leaving Proposal#is_valid, Valid = True") return True
def import_gobject_from_dashd(self, dashd, rec): import decimal import dashlib import binascii import gobject_json object_hash = rec['Hash'] gobj_dict = { 'object_hash': object_hash, 'object_fee_tx': rec['CollateralHash'], 'absolute_yes_count': rec['AbsoluteYesCount'], 'abstain_count': rec['AbstainCount'], 'yes_count': rec['YesCount'], 'no_count': rec['NoCount'], } # deserialise and extract object json_str = binascii.unhexlify(rec['DataHex']).decode('utf-8') dikt = gobject_json.extract_object(json_str) subobj = None type_class_map = { 1: Proposal, 2: Superblock, } subclass = type_class_map[dikt['type']] # set object_type in govobj table gobj_dict['object_type'] = subclass.govobj_type # exclude any invalid model data from dashd... valid_keys = subclass.serialisable_fields() subdikt = {k: dikt[k] for k in valid_keys if k in dikt} # get/create, then sync vote counts from dashd, with every run govobj, created = self.get_or_create(object_hash=object_hash, defaults=gobj_dict) if created: printdbg("govobj created = %s" % created) count = govobj.update(**gobj_dict).where( self.id == govobj.id).execute() if count: printdbg("govobj updated = %d" % count) subdikt['governance_object'] = govobj # get/create, then sync payment amounts, etc. from dashd - Dashd is the master try: newdikt = subdikt.copy() newdikt['object_hash'] = object_hash if subclass(**newdikt).is_valid() is False: govobj.vote_delete(dashd) return (govobj, None) subobj, created = subclass.get_or_create(object_hash=object_hash, defaults=subdikt) except Exception as e: # in this case, vote as delete, and log the vote in the DB printdbg("Got invalid object from dashd! %s" % e) govobj.vote_delete(dashd) return (govobj, None) if created: printdbg("subobj created = %s" % created) count = subobj.update(**subdikt).where( subclass.id == subobj.id).execute() if count: printdbg("subobj updated = %d" % count) # ATM, returns a tuple w/gov attributes and the govobj return (govobj, subobj)
def vote(self, dashd, signal, outcome): import dashlib # At this point, will probably never reach here. But doesn't hurt to # have an extra check just in case objects get out of sync (people will # muck with the DB). if (self.object_hash == '0' or not misc.is_hash(self.object_hash)): printdbg("No governance object hash, nothing to vote on.") return # have I already voted on this gobject with this particular signal and outcome? if self.voted_on(signal=signal): printdbg("Found a vote for this gobject/signal...") vote = self.votes.where(Vote.signal == signal)[0] # if the outcome is the same, move on, nothing more to do if vote.outcome == outcome: # move on. printdbg( "Already voted for this same gobject/signal/outcome, no need to re-vote." ) return else: printdbg( "Found a STALE vote for this gobject/signal, deleting so that we can re-vote." ) vote.delete_instance() else: printdbg("Haven't voted on this gobject/signal yet...") # now ... vote! vote_command = self.get_vote_command(signal, outcome) printdbg(' '.join(vote_command)) output = dashd.rpc_command(*vote_command) # extract vote output parsing to external lib voted = dashlib.did_we_vote(output) if voted: printdbg('VOTE success, saving Vote object to database') Vote(governance_object=self, signal=signal, outcome=outcome, object_hash=self.object_hash).save() else: printdbg('VOTE failed, trying to sync with network vote') self.sync_network_vote(dashd, signal)
def main(): # anond = anondaemon.from_dash_conf(config.dash_conf) anond = AnonDaemon.from_anon_conf(config.anon_conf) options = process_args() # check anond connectivity # if not is_anond_port_open(anond): if not is_anond_port_open(anond): print( "Cannot connect to anond. Please ensure anond is running and the JSONRPC port is open to Sentinel." ) return # check anond sync # if not anond.is_synced(): if not anond.is_synced(): print( "anond not synced with network! Awaiting full sync before running Sentinel." ) return # ensure valid masternode # if not anond.is_masternode(): if not anond.is_masternode(): print("Invalid Masternode Status, cannot continue.") return # register a handler if SENTINEL_DEBUG is set if os.environ.get('SENTINEL_DEBUG', None): import logging logger = logging.getLogger('peewee') logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) if options.bypass: # bypassing scheduler, remove the scheduled event printdbg("--bypass-schedule option used, clearing schedule") Scheduler.clear_schedule() if not Scheduler.is_run_time(): printdbg("Not yet time for an object sync/vote, moving on.") return if not options.bypass: # delay to account for cron minute sync Scheduler.delay() # running now, so remove the scheduled event Scheduler.clear_schedule() # ======================================================================== # general flow: # ======================================================================== # # load "gobject list" rpc command data, sync objects into internal database # perform_anond_object_sync(anond) perform_anond_object_sync(anond) # if anond.has_sentinel_ping: # sentinel_ping(anond) if anond.has_sentinel_ping: sentinel_ping(anond) else: # delete old watchdog objects, create a new if necessary # watchdog_check(anond) watchdog_check(anond) # auto vote network objects as valid/invalid # check_object_validity(anond) # vote to delete expired proposals prune_expired_proposals(anond) prune_expired_proposals(anond) # create a Superblock if necessary # attempt_superblock_creation(anond) # attempt_superblock_creation(anond) # schedule the next run Scheduler.schedule_next_run()
def watchdog_check(anond): printdbg("in watchdog_check") # delete expired watchdogs for wd in Watchdog.expired(anond): printdbg("\tFound expired watchdog [%s], voting to delete" % wd.object_hash) wd.vote(anond, VoteSignals.delete, VoteOutcomes.yes) # now, get all the active ones... active_wd = Watchdog.active(anond) active_count = active_wd.count() # none exist, submit a new one to the network if 0 == active_count: # create/submit one printdbg("\tNo watchdogs exist... submitting new one.") wd = Watchdog(created_at=int(time.time())) wd.submit(anond) else: wd_list = sorted(active_wd, key=lambda wd: wd.object_hash) # highest hash wins winner = wd_list.pop() printdbg("\tFound winning watchdog [%s], voting VALID" % winner.object_hash) winner.vote(anond, VoteSignals.valid, VoteOutcomes.yes) # if remaining Watchdogs exist in the list, vote delete for wd in wd_list: printdbg("\tFound losing watchdog [%s], voting DELETE" % wd.object_hash) wd.vote(anond, VoteSignals.delete, VoteOutcomes.yes) printdbg("leaving watchdog_check")
def create_superblock(proposals, event_block_height, budget_max, sb_epoch_time): from models import Superblock, GovernanceObject, Proposal from constants import SUPERBLOCK_FUDGE_WINDOW # don't create an empty superblock if (len(proposals) == 0): printdbg("No proposals, cannot create an empty superblock.") return None budget_allocated = Decimal(0) fudge = SUPERBLOCK_FUDGE_WINDOW # fudge-factor to allow for slighly incorrect estimates payments = [] for proposal in proposals: fmt_string = "name: %s, rank: %4d, hash: %s, amount: %s <= %s" # skip proposals that are too expensive... if (budget_allocated + proposal.payment_amount) > budget_max: printdbg(fmt_string % ( proposal.name, proposal.rank, proposal.object_hash, proposal.payment_amount, "skipped (blows the budget)", )) continue # skip proposals if the SB isn't within the Proposal time window... window_start = proposal.start_epoch - fudge window_end = proposal.end_epoch + fudge printdbg("\twindow_start: %s" % epoch2str(window_start)) printdbg("\twindow_end: %s" % epoch2str(window_end)) printdbg("\tsb_epoch_time: %s" % epoch2str(sb_epoch_time)) if (sb_epoch_time < window_start or sb_epoch_time > window_end): printdbg(fmt_string % ( proposal.name, proposal.rank, proposal.object_hash, proposal.payment_amount, "skipped (SB time is outside of Proposal window)", )) continue printdbg(fmt_string % ( proposal.name, proposal.rank, proposal.object_hash, proposal.payment_amount, "adding", )) # else add proposal and keep track of total budget allocation budget_allocated += proposal.payment_amount payment = { 'address': proposal.payment_address, 'amount': "{0:.8f}".format(proposal.payment_amount), 'proposal': "{}".format(proposal.object_hash) } payments.append(payment) # don't create an empty superblock if not payments: printdbg("No proposals made the cut!") return None # 'payments' now contains all the proposals for inclusion in the # Superblock, but needs to be sorted by proposal hash descending payments.sort(key=lambda k: k['proposal'], reverse=True) sb = Superblock( event_block_height=event_block_height, payment_addresses='|'.join([pd['address'] for pd in payments]), payment_amounts='|'.join([pd['amount'] for pd in payments]), proposal_hashes='|'.join([pd['proposal'] for pd in payments]), ) printdbg("generated superblock: %s" % sb.__dict__) return sb
parser.add_argument('-b', '--bypass-scheduler', action='store_true', help='Bypass scheduler and sync/vote immediately', dest='bypass') args = parser.parse_args() return args if __name__ == '__main__': atexit.register(cleanup) signal.signal(signal.SIGINT, signal_handler) # ensure another instance of Sentinel is not currently running mutex_key = 'SENTINEL_RUNNING' # assume that all processes expire after 'timeout_seconds' seconds timeout_seconds = 90 is_running = Transient.get(mutex_key) if is_running: printdbg("An instance of Sentinel is already running -- aborting.") sys.exit(1) else: Transient.set(mutex_key, misc.now(), timeout_seconds) # locked to this instance -- perform main logic here main() Transient.delete(mutex_key)
def is_expired(self, superblockcycle=None): from constants import SUPERBLOCK_FUDGE_WINDOW import dacashlib if not superblockcycle: raise Exception("Required field superblockcycle missing.") printdbg("In Proposal#is_expired, for Proposal: %s" % self.__dict__) now = misc.now() printdbg("\tnow = %s" % now) # half the SB cycle, converted to seconds # add the fudge_window in seconds, defined elsewhere in Sentinel expiration_window_seconds = int( (dacashlib.blocks_to_seconds(superblockcycle) / 2) + SUPERBLOCK_FUDGE_WINDOW ) printdbg("\texpiration_window_seconds = %s" % expiration_window_seconds) # "fully expires" adds the expiration window to end time to ensure a # valid proposal isn't excluded from SB by cutting it too close fully_expires_at = self.end_epoch + expiration_window_seconds printdbg("\tfully_expires_at = %s" % fully_expires_at) if (fully_expires_at < now): printdbg("\tProposal end_epoch [%s] < now [%s] , returning True" % (self.end_epoch, now)) return True printdbg("Leaving Proposal#is_expired, Expired = False") return False
def is_valid(self): import dacashlib import decimal printdbg("In Superblock#is_valid, for SB: %s" % self.__dict__) # it's a string from the DB... addresses = self.payment_addresses.split('|') for addr in addresses: if not dacashlib.is_valid_dacash_address(addr, config.network): printdbg("\tInvalid address [%s], returning False" % addr) return False amounts = self.payment_amounts.split('|') for amt in amounts: if not misc.is_numeric(amt): printdbg("\tAmount [%s] is not numeric, returning False" % amt) return False # no negative or zero amounts allowed damt = decimal.Decimal(amt) if not damt > 0: printdbg("\tAmount [%s] is zero or negative, returning False" % damt) return False # verify proposal hashes correctly formatted... if len(self.proposal_hashes) > 0: hashes = self.proposal_hashes.split('|') for object_hash in hashes: if not misc.is_hash(object_hash): printdbg("\tInvalid proposal hash [%s], returning False" % object_hash) return False # ensure number of payment addresses matches number of payments if len(addresses) != len(amounts): printdbg("\tNumber of payment addresses [%s] != number of payment amounts [%s], returning False" % (len(addresses), len(amounts))) return False printdbg("Leaving Superblock#is_valid, Valid = True") return True
def attempt_superblock_creation(sarosd): import saroslib if not sarosd.is_masternode(): print("We are not a Masternode... can't submit superblocks!") return # query votes for this specific ebh... if we have voted for this specific # ebh, then it's voted on. since we track votes this is all done using joins # against the votes table # # has this masternode voted on *any* superblocks at the given event_block_height? # have we voted FUNDING=YES for a superblock for this specific event_block_height? event_block_height = sarosd.next_superblock_height() if Superblock.is_voted_funding(event_block_height): # printdbg("ALREADY VOTED! 'til next time!") # vote down any new SBs because we've already chosen a winner for sb in Superblock.at_height(event_block_height): if not sb.voted_on(signal=VoteSignals.funding): sb.vote(sarosd, VoteSignals.funding, VoteOutcomes.no) # now return, we're done return if not sarosd.is_govobj_maturity_phase(): printdbg("Not in maturity phase yet -- will not attempt Superblock") return proposals = Proposal.approved_and_ranked( proposal_quorum=sarosd.governance_quorum(), next_superblock_max_budget=sarosd.next_superblock_max_budget()) budget_max = sarosd.get_superblock_budget_allocation(event_block_height) sb_epoch_time = sarosd.block_height_to_epoch(event_block_height) sb = saroslib.create_superblock(proposals, event_block_height, budget_max, sb_epoch_time) if not sb: printdbg("No superblock created, sorry. Returning.") return # find the deterministic SB w/highest object_hash in the DB dbrec = Superblock.find_highest_deterministic(sb.hex_hash()) if dbrec: dbrec.vote(sarosd, VoteSignals.funding, VoteOutcomes.yes) # any other blocks which match the sb_hash are duplicates, delete them for sb in Superblock.select().where( Superblock.sb_hash == sb.hex_hash()): if not sb.voted_on(signal=VoteSignals.funding): sb.vote(sarosd, VoteSignals.delete, VoteOutcomes.yes) printdbg( "VOTED FUNDING FOR SB! We're done here 'til next superblock cycle." ) return else: printdbg("The correct superblock wasn't found on the network...") # if we are the elected masternode... if (sarosd.we_are_the_winner()): printdbg("we are the winner! Submit SB to network") sb.submit(sarosd)
def sentinel_ping(sarosd): printdbg("in sentinel_ping") sarosd.ping() printdbg("leaving sentinel_ping")
def import_gobject_from_curved(self, curved, rec): import decimal import curvelib import inflection object_hex = rec['DataHex'] object_hash = rec['Hash'] gobj_dict = { 'object_hash': object_hash, 'object_fee_tx': rec['CollateralHash'], 'absolute_yes_count': rec['AbsoluteYesCount'], 'abstain_count': rec['AbstainCount'], 'yes_count': rec['YesCount'], 'no_count': rec['NoCount'], } # shim/curved conversion object_hex = curvelib.SHIM_deserialise_from_curved(object_hex) objects = curvelib.deserialise(object_hex) subobj = None obj_type, dikt = objects[0:2:1] obj_type = inflection.pluralize(obj_type) subclass = self._meta.reverse_rel[obj_type].model_class # set object_type in govobj table gobj_dict['object_type'] = subclass.govobj_type # exclude any invalid model data from curved... valid_keys = subclass.serialisable_fields() subdikt = {k: dikt[k] for k in valid_keys if k in dikt} # get/create, then sync vote counts from curved, with every run govobj, created = self.get_or_create(object_hash=object_hash, defaults=gobj_dict) if created: printdbg("govobj created = %s" % created) count = govobj.update(**gobj_dict).where(self.id == govobj.id).execute() if count: printdbg("govobj updated = %d" % count) subdikt['governance_object'] = govobj # get/create, then sync payment amounts, etc. from curved - Curved is the master try: newdikt = subdikt.copy() newdikt['object_hash'] = object_hash if subclass(**newdikt).is_valid() is False: govobj.vote_delete(curved) return (govobj, None) subobj, created = subclass.get_or_create(object_hash=object_hash, defaults=subdikt) except Exception as e: # in this case, vote as delete, and log the vote in the DB printdbg("Got invalid object from curved! %s" % e) govobj.vote_delete(curved) return (govobj, None) if created: printdbg("subobj created = %s" % created) count = subobj.update(**subdikt).where(subclass.id == subobj.id).execute() if count: printdbg("subobj updated = %d" % count) # ATM, returns a tuple w/gov attributes and the govobj return (govobj, subobj)
def main(): dashd = DashDaemon.from_dash_conf(config.dash_conf) options = process_args() # print version and return if "--version" is an argument if options.version: print("Dash Sentinel v%s" % config.sentinel_version) return # check dashd connectivity if not is_dashd_port_open(dashd): print( "Cannot connect to dashd. Please ensure dashd is running and the JSONRPC port is open to Sentinel." ) return # check dashd sync if not dashd.is_synced(): print( "dashd not synced with network! Awaiting full sync before running Sentinel." ) return # ensure valid masternode if not dashd.is_masternode(): print("Invalid Masternode Status, cannot continue.") return # register a handler if SENTINEL_DEBUG is set if os.environ.get('SENTINEL_DEBUG', None): import logging logger = logging.getLogger('peewee') logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) if options.bypass: # bypassing scheduler, remove the scheduled event printdbg("--bypass-schedule option used, clearing schedule") Scheduler.clear_schedule() if not Scheduler.is_run_time(): printdbg("Not yet time for an object sync/vote, moving on.") return if not options.bypass: # delay to account for cron minute sync Scheduler.delay() # running now, so remove the scheduled event Scheduler.clear_schedule() # ======================================================================== # general flow: # ======================================================================== # # load "gobject list" rpc command data, sync objects into internal database perform_dashd_object_sync(dashd) # auto vote network objects as valid/invalid # check_object_validity(dashd) # vote to delete expired proposals prune_expired_proposals(dashd) # create a Superblock if necessary attempt_superblock_creation(dashd) # schedule the next run Scheduler.schedule_next_run()
def sentinel_ping(papeld): printdbg("in sentinel_ping") papeld.ping() printdbg("leaving sentinel_ping")