def assemble_orders(rein, job_ids): """ Take a list of job_ids and build their entire orders. The idea here is that one Job ID should allow us to query each available server for each document type that is associated with it, then filter out cruft by focusing on who's signed correctly. TODO: look for attempted changes in foundational info like participants public keys and redeem scripts. """ urls = Bucket.get_urls(rein) documents = [] arg_job_ids = ','.join(job_ids) for url in urls: # queries remote server for all docs associated with a job_id res = Document.get_documents_by_job_id(rein, url, arg_job_ids) if res: documents += res order_ids = {} order_id = None for job_id in job_ids: order_id = Order.get_order_id(rein, job_id) if not order_id: o = Order(job_id, testnet=rein.testnet) rein.session.add(o) rein.session.commit() order_id = Order.get_order_id(rein, job_id) order_ids[job_id] = order_id if not order_id: return 0 for document in documents: doc_type = Document.get_document_type(document) if not doc_type: rein.log.info('doc_type not detected') continue doc_hash = Document.calc_hash(document) job_id = Document.get_job_id(document) d = rein.session.query(Document).filter( Document.doc_hash == doc_hash).first() if d: d.set_order_id(order_ids[job_id]) rein.session.add(d) else: new_document = Document(rein, doc_type, document, order_id, 'remote', source_key=None, sig_verified=True, testnet=rein.testnet) rein.session.add(new_document) rein.session.commit() return len(documents)
def assemble_order(rein, document): """ Take one document and build the entire order based on it. The idea here is that one Job ID should allow us to query each available server for each document type that is associated with it, then filter out bogus shit by focusing on who's signed correct stuff. This kind of command can also look for attempted changes in foundational info like participants public keys and redeem scripts. If this works well, we can reduce how much data is required at each stage. Finally, we should be able to serialize a job from end to end so it can be easily reviewed by a mediator. """ parsed = parse_document(document.contents) if 'Job ID' not in parsed: return 0 job_id = parsed['Job ID'] urls = Bucket.get_urls(rein) documents = [] if job_id: for url in urls: # queries remote server for all docs associated with a job_id res = Document.get_documents_by_job_id(rein, url, job_id) if res: documents += res order_id = Order.get_order_id(rein, job_id) if not order_id: o = Order(job_id, testnet=rein.testnet) rein.session.add(o) rein.session.commit() for document in documents: doc_type = Document.get_document_type(document) if not doc_type: rein.log.info('doc_type not detected') continue doc_hash = Document.calc_hash(document) d = rein.session.query(Document).filter( Document.doc_hash == doc_hash).first() if d: d.set_order_id(order_id) rein.session.add(d) else: new_document = Document(rein, doc_type, document, order_id, 'remote', source_key=None, sig_verified=True, testnet=rein.testnet) rein.session.add(new_document) rein.session.commit() return len(documents)
def assemble_order(rein, document): """ Take one document and build the entire order based on it. The idea here is that one Job ID should allow us to query each available server for each document type that is associated with it, then filter out bogus shit by focusing on who's signed correct stuff. This kind of command can also look for attempted changes in foundational info like participants public keys and redeem scripts. If this works well, we can reduce how much data is required at each stage. Finally, we should be able to serialize a job from end to end so it can be easily reviewed by a mediator. """ parsed = parse_document(document.contents) if 'Job ID' not in parsed: return 0 job_id = parsed['Job ID'] urls = Bucket.get_urls(rein) documents = [] if job_id: for url in urls: res = Document.get_documents_by_job_id(rein, url, job_id) if res: documents += res order_id = Order.get_order_id(rein, job_id) if not order_id: o = Order(job_id, testnet=rein.testnet) rein.session.add(o) rein.session.commit() for document in documents: doc_type = Document.get_document_type(document) if not doc_type: rein.log.info('doc_type not detected') continue doc_hash = Document.calc_hash(document) d = rein.session.query(Document).filter(Document.doc_hash == doc_hash).first() if d: d.set_order_id(order_id) else: new_document = Document(rein, doc_type, document, order_id, 'external', source_key=None, sig_verified=True, testnet=rein.testnet) rein.session.add(new_document) rein.session.commit() return len(documents)