def doDistribution(destinationNode, server, sourceUrl, destinationUrl, lock): # We want to always use the replication filter function to replicate # only distributable doc and filter out any other type of documents. # However we don't have any query arguments until we test if there is any filter. replicationOptions={'filter':ResourceDataModel.REPLICATION_FILTER, 'query_params': None} # If the destination node is using an filter and is not custom use it # as the query params for the filter function if ((destinationNode.filterDescription is not None) and (destinationNode.filterDescription.custom_filter == False)): replicationOptions['query_params'] = destinationNode.filterDescription.specData #if distinationNode['distribute service'] .service_auth["service_authz"] is not None: #log.info("Destination node '{}' require authentication".format(destinationUrl)) #Try to get the user name and password the url. credential = sourceLRNode.getDistributeCredentialFor(destinationUrl) if credential is not None: parsedUrl = urlparse.urlparse(destinationUrl) destinationUrl = destinationUrl.replace(parsedUrl.netloc, "{0}:{1}@{2}".format( credential['username'], credential['password'], parsedUrl.netloc)) log.info("\n\nReplication started\nSource:{0}\nDestionation:{1}\nArgs:{2}".format( sourceUrl, destinationUrl, str(replicationOptions))) if replicationOptions['query_params'] is None: del replicationOptions['query_params'] results = server.replicate(sourceUrl, destinationUrl, **replicationOptions) log.debug("Replication results: "+str(results)) with lock: server = couchdb.Server(appConfig['couchdb.url']) db = server[appConfig['couchdb.db.node']] doc = db[appConfig['lr.nodestatus.docid']] doc['last_out_sync'] = h.nowToISO8601Zformat() doc['out_sync_node'] = destinationNode.nodeDescription.node_name db[appConfig['lr.nodestatus.docid']] = doc
def _setNodeStatus(self): nodeStatus = None nodeStatusId = "node status" nodeStatus = NodeStatusModel.get(nodeStatusId) if nodeStatus is None: nodeStatus = NodeStatusModel() nodeStatus.active = self.nodeDescription.active nodeStatus.node_id = self._nodeDescription.node_id nodeStatus.node_name = self.nodeDescription.node_name nodeStatus.start_time = h.nowToISO8601Zformat() nodeStatus.install_time = nodeStatus.start_time nodeStatus.save(doc_id = nodeStatusId) else: nodeStatus = NodeStatusModel(nodeStatus) nodeStatus.start_time = h.nowToISO8601Zformat() nodeStatus.update() self._nodeStatus = nodeStatus
def set_timestamps(self, model, timestamp=None): if timestamp == None: timestamp = helpers.nowToISO8601Zformat() for stamp in ResourceDataHelper.TIME_STAMPS: if stamp not in model or stamp is 'node_timestamp': model[stamp] = timestamp return model
def _getNodeJsonDescription(self): description = {'timestamp': h.nowToISO8601Zformat()} description.update(self._communityDescription.descriptionDict) description.update(self._networkDescription.descriptionDict) description.update(self._nodeDescription.descriptionDict) description.update(self._networkPolicyDescription.descriptionDict) if hasattr(self, '_filterDescription'): description['filter'] = self._filterDescription.descriptionDict return json.dumps(description, indent=2)
def _getStatusDescription(self): count = 0 view = ResourceDataModel._defaultDB.view(appConfig['couchdb.db.resourcecount'],stale=appConfig['couchdb.stale.flag']) if len(view.rows) > 0: count = view.rows[0].value statusData = {'doc_count': count, 'timestamp': h.nowToISO8601Zformat()} statusData.update(self._nodeStatus.specData) return statusData
def __init__(self, data=None): super(ResourceDataModel, self).__init__(data) # Set the timestamps by default on creation use utc. timeStamp = h.nowToISO8601Zformat() # Set the timestap on creation if they are not set. for stamp in self._TIME_STAMPS: if stamp not in self._specData.keys(): self.__setattr__(stamp, timeStamp) # Set the doc is none is provided. if self._DOC_ID not in self._specData.keys(): doc_id = uuid4().hex self.__setattr__(self._DOC_ID, doc_id)
def publishResourceData(self, docs): resourceDatabase = self._server[self._nodeConfig.get("couch_info", "resourcedata")] for d in docs: doc = {} doc.update(d) # delete any previous revision number for the docs if "_rev" in doc: del doc["_rev"] doc["doc_ID"] = uuid.uuid4().hex now = h.nowToISO8601Zformat() doc["node_timestamp"] = now doc["create_timestamp"] = now doc["update_timestamp"] = now resourceDatabase[doc["doc_ID"]] = doc self.waitOnChangeMonitor()
def load_environment(global_conf, app_conf): """Configure the Pylons environment via the ``pylons.config`` object """ config = PylonsConfig() # Pylons paths root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) paths = dict(root=root, controllers=os.path.join(root, 'controllers'), static_files=os.path.join(root, 'public'), templates=[os.path.join(root, 'templates')]) try: logging.config.fileConfig(global_conf['__file__']) except: pass#to make unit tests run # Initialize config with the basic options config.init_app(global_conf, app_conf, package='lr', paths=paths) config['routes.map'] = make_map(config) config['pylons.app_globals'] = app_globals.Globals(config) config['pylons.h'] = lr.lib.helpers config['pylons.response_options']['content-type'] = 'application/json' # Setup cache object as early as possible import pylons pylons.cache._push_object(config['pylons.app_globals'].cache) # Create the Mako TemplateLookup, with the default auto-escaping config['pylons.app_globals'].mako_lookup = TemplateLookup( directories=paths['templates'], error_handler=handle_mako_error, module_directory=os.path.join(app_conf['cache_dir'], 'templates'), input_encoding='utf-8', default_filters=['escape'], imports=['from webhelpers.html import escape']) # CONFIGURATION OPTIONS HERE (note: all config options will override # any Pylons config options) import couchdb import lr.lib.helpers as helpers server = couchdb.Server(config['couchdb.url.dbadmin']) db = server[config['couchdb.db.node']] doc = db[config['lr.nodestatus.docid']] doc['start_time'] = helpers.nowToISO8601Zformat() db.save(doc) return config
def _getNodeJsonPolicy(self): policy = {'timestamp': h.nowToISO8601Zformat()} key="network_description" if key in self._networkDescription.descriptionDict.keys(): policy[key] = self._networkDescription.descriptionDict[key] key="network_name" if key in self._networkDescription.descriptionDict.keys(): policy[key] = self._networkDescription.descriptionDict[key] key="node_id" if key in self._nodeDescription.descriptionDict.keys(): policy[key] = self._nodeDescription.descriptionDict[key] key="node_name" if key in self._nodeDescription.descriptionDict.keys(): policy[key] = self._nodeDescription.descriptionDict[key] # policy.update(self._networkPolicyDescription.descriptionDict) policy.update(self._networkPolicyDescription.specData) return json.dumps(policy, indent=2)
def handleDocument(newDoc): should_delete = True try: newDoc['node_timestamp'] = h.nowToISO8601Zformat() rd = ResourceDataModel(newDoc) rd.save(log_exceptions=False) except SpecValidationException as e: log.error(newDoc['_id'] + str(e)) except ResourceConflict: log.error('conflict') except Exception as ex: should_delete = False # don't delete something unexpected happend log.error(ex) if should_delete: try: del database[newDoc['_id']] except Exception as ex: log.error(ex)
def makeTombstone(replacement_doc, replacement_info, orig_doc_id, orig_doc=None): tombstone = copy.deepcopy(_TOMBSTONE_TEMPLATE) tombstone.update({ "doc_ID": orig_doc_id, "create_timestamp": nowToISO8601Zformat() }) if orig_doc is not None: tombstone["resource_locator"] = orig_doc["resource_locator"] if "replaces" in orig_doc: tombstone["replaces"] = orig_doc["replaces"] else: del tombstone["replaces"] tombstone["replaced_by"].update({ "doc_ID": replacement_doc["doc_ID"], "public_key_fingerprint": replacement_info.pubkey_fingerprint, "public_key_locations": replacement_doc["digital_signature"]["key_location"] }) return tombstone
def _handle(self, change, database): should_delete = True try: log.debug('got here') newDoc = change[_DOC] newDoc['node_timestamp'] = h.nowToISO8601Zformat() rd = ResourceDataModel(newDoc) rd.save(log_exceptions=False) except SpecValidationException: log.error(str(newDoc) + " Fails Validation" ) except ResourceConflict: pass #ignore conflicts except Exception as ex: should_delete = False # don't delete something unexpected happend log.error(ex) if should_delete: try: del database[change[_ID]] except Exception as ex: log.error(ex)
def _getStatusDescription(self): statusData = {'doc_count': ResourceDataModel._defaultDB.view(appConfig['couchdb.db.resourcesview'],stale='ok').total_rows, 'timestamp': h.nowToISO8601Zformat()} statusData.update(self._nodeStatus.specData) return statusData
def recordChanges(): if self._monitoringChanges == True: return self._monitoringChanges = True; db = ResourceDataModel._defaultDB while True: # I have to include the doc since the filter does seems to work. Otherwise # using the same replication filter to get only resource_data document # would work been better. options ={'feed': 'continuous', 'since': self._lastChangeSeq, 'include_docs':True } changes = db.changes(**options) for change in changes: if 'doc' not in change: continue timestamp = h.nowToISO8601Zformat() # See if the document is of resource_data type if not ignore it. doc = change['doc'] if ((not 'resource_data' in doc) and (not 'resource_data_distributable' in doc)): continue log.info("Change to handle ....") # Handle resource_data. if doc['doc_type'] == 'resource_data': log.info("\*******Changes to resource_document: "+doc['_id']) distributableID = doc['_id']+"-distributable" # Use the ResourceDataModel class to create an object that # contains only a the resource_data spec data. distributableDoc = ResourceDataModel(doc)._specData #remove the node timestamp del distributableDoc['node_timestamp'] #change thet doc_type distributableDoc['doc_type']='resource_data_distributable' log.debug("\n\ndistributable doc:\n{0}\n".format(pprint.pformat(distributableDoc))) # Check to see if a corresponding distributatable document exist. # not create a new distribuation document without the # node_timestamp and _id+distributable. if not distributableID in db: try: log.info('Adding distributable doc...\n') db[distributableID] = distributableDoc except Exception as e: log.error("Cannot save distributable document copy\n") log.exception(e) else: # A distributable copy of the document is in the database. See # if need be updated. try: savedDistributableDoc = db[distributableID] except Exception as e: log.error("Cannot get existing distributatable document\n") log.exception(e) continue temp = {} temp.update(savedDistributableDoc); # Remove the couchdb generated field so that can compare # the two document and see it there is a need for update. for k in _COUCHDB_FIELDS: if k in temp: del temp[k] if distributableDoc != temp: savedDistributableDoc.update(distributableDoc) log.info("\n\nUpdate distribuatable doc:\n") log.debug("\n{0}\n\n".format(pprint.pformat(distributableDoc))) try: db.update([savedDistributableDoc]) except Exception as e: log.error("Failed to update existing distributable doc: {0}".format( pprint.pformat(savedDistributableDoc))) log.exception(e) elif doc['doc_type'] == 'resource_data_distributable': log.info("\n-------Changes to distributable resource doc: "+doc['_id']) #check if the document is alredy in the database. resourceDataID = doc['doc_ID'] # Create a resource_data object from the distributable data. # the specData will generate a node_timestamp be default resourceDataDoc = ResourceDataModel(doc)._specData resourceDataDoc['doc_type'] = 'resource_data' if resourceDataID not in db: try: log.info("Adding new resource_data for distributable") db[resourceDataID] = resourceDataDoc except Exception as e: log.error("\n\nCannot get current document: {0}".format( pprint.pformat(resourceDataDoc))) log.exception(e) else: # There exist already a resource_data document for the distributable # get it and see if it needs to be updated. try: savedResourceDoc = db[resourceDataID] except Exception as e: log.error("\n\nCannot find existing resource_data doc for distributable: \n{0}".format( pprint.pformat(doc))) log.exception(e) continue #Remove the couchdb generated fields. temp = {} temp.update(savedResourceDoc) for k in _COUCHDB_FIELDS: if k in temp: del temp[k] # Now deleate the node_timestamp field on both document # before comparing them. del temp['node_timestamp'] del resourceDataDoc['node_timestamp'] if temp != resourceDataDoc: savedResourceDoc.update(resourceDataDoc) try: log.info("\nUpdate existing resource data from distributable\n") db.update([savedResourceDoc]) except Exception as e: log.error("\n\nFailed to udpate existing resource_data doc:\n{0}".format( pprint.pformat(savedResourceDoc))) log.exception(e) # Keep last change sequence. self._lastChangeSeq = change['seq']
def _getStatusDescription(self): statusData = {'doc_count': ResourceDataModel._defaultDB.info()['doc_count'], 'timestamp': h.nowToISO8601Zformat()} statusData.update(self._nodeStatus.specData) return statusData