def start_dbs_monitor(changes_callback): logger.debug(u'Starting global workspaces monitor') dbs_stream = ChangesStream(ChangesStream.ALL_DBS, feed='continuous', heartbeat='true') monitor_thread = DBsMonitorThread(dbs_stream, changes_callback) monitor_thread.daemon = True monitor_thread.start() return monitor_thread
def edit_doctor(doctorid, post_data): """ Edit a doctor in the system. The post data is a dict. :param post_data: dict :returns: a status, a str( doctor's info on success, err info on failure) """ # print(post_data) try: logger.debug('in edit_doctor') DoctorModel.update_by_dict(doctorid, post_data) logger.debug('executed') # except peewee.IntegrityError: # logger.warning('in doctor model create except') # # `username` is a unique column, so this username already exists, # # making it safe to call .get(). # old_user = AccountModel.get(AccountModel.username == username) # logger.warning('user exists...') # resp_dict['info'] = 'user exists, did not create user:%s' % username # resp.status = falcon.HTTP_409 # try: # change_user = AccountModel.get(AccountModel.username==username, # AccountModel.password==password) # except: # logger.debug('change user data failed...') except Exception as ex: logger.error('Exception: ', ex) return 0, 'edit_doctor failed, did not edit doctor' else: return 1, str(doctorid)
def broadcast(self, msg): logger.debug("broadcasting prepared message '{}' ..".format(msg)) prepared_msg = json.loads(self.prepareMessage(msg).payload) for client in self.workspace_clients[prepared_msg['workspace']]: reactor.callFromThread(client.sendPreparedMessage, self.prepareMessage(msg)) logger.debug("prepared message sent to {}".format(client.peer))
def __iter__(self): while not self.__stop: try: # TODO: Connection timeout is too long. self.__response = requests.get(self.__url, params=self.__params, stream=True, auth=get_auth_info()) for raw_line in self.__response.iter_lines(): line = self.__sanitize(raw_line) if not line: continue change = self.__parse_change(line) if not change: continue yield change except Exception, e: import traceback logger.debug(traceback.format_exc()) # Close everything but keep retrying self.stop() self.__stop = True logger.warning( u"Lost connection to CouchDB. Retrying in 5 seconds...") time.sleep(5) logger.info(u"Retrying...")
def run(self): for change_doc in self.__stream: try: if self.CHANGE_CLS.validate(change_doc): self.__changes_callback(self.CHANGE_CLS(change_doc)) else: logger.debug(u'Ignoring change: {}'.format(change_doc)) except Exception, e: import traceback logger.debug(traceback.format_exc()) logger.warning( u"Error while processing change. Ignoring. Offending change: {}" .format(change_doc)) if change_doc.get('error', None): if change_doc.get('error') == 'unauthorized': logger.error(u"Unauthorized access to CouchDB. Make sure faraday-server's"\ " configuration file has CouchDB admin's credentials set") thread.interrupt_main() # TODO: A proper fix is needed here elif change_doc.get('reason') == 'no_db_file': self.__stream.stop() break
def post(self): body = json.loads(self.request.body.decode('utf-8')) if 'seq' not in body: raise HTTPError(400, 'You must have a sequence to query against') idx = SequenceIndex() found = idx.find({'sequence': body['seq']}, body.get('dist', 100)) logger.debug('Found {} hits from index'.format(len(found))) items_dict = OrderedDict() for distance, item in found: items_dict[item['name']] = distance seqs = yield self.sequence_repository.find( {'sequence_id': { '$in': list(items_dict.keys()) }}) seq_dict = { seq['sequence_id']: { **seq, 'distance': items_dict[seq['sequence_id']] } for seq in seqs } self.write_json([seq_dict[key] for key in items_dict.keys()])
def start_changes_monitor(self, changes_callback, last_seq=0): logger.debug(u'Starting changes monitor for workspace {} since {}'.format(self.__ws_name, last_seq)) ws_stream = ChangesStream(self.__ws_name, feed='continuous', since=last_seq, include_docs='true', heartbeat='true') self.__changes_monitor_thread = ChangesMonitorThread(ws_stream, changes_callback) self.__changes_monitor_thread.daemon = True self.__changes_monitor_thread.start()
def __iter__(self): while not self.__stop: try: # TODO: Connection timeout is too long. self.__response = requests.get( self.__url, params=self.__params, stream=True, auth=get_auth_info()) for raw_line in self.__response.iter_lines(): line = self.__sanitize(raw_line) if not line: continue change = self.__parse_change(line) if not change: continue yield change except Exception, e: import traceback logger.debug(traceback.format_exc()) # Close everything but keep retrying self.stop() self.__stop = True logger.warning(u"Lost connection to CouchDB. Retrying in 5 seconds...") time.sleep(5) logger.info(u"Retrying...")
def upload_views(workspace): """ Upload views with couchdb behind of ViewsManager """ vmanager = ViewsManager() try: vmanager.addViews(workspace) except: import traceback logger.debug(traceback.format_exc()) logger.warning("Views documents couldn't be uploaded. You need to be an admin to do it")
def get_config(config_path): lookup_paths = [os.curdir, expanduser('~'), '/etc/', dirname(__file__)] conf = Config.load(config_path, conf_name='application.conf', lookup_paths=lookup_paths) logger.debug(json.dumps(conf.items, indent=4)) return conf
def unregister(self, client_to_unregister): """ Search for the client_to_unregister in all workspaces """ for workspace_name, clients in self.workspace_clients.items(): for client in clients: if client == client_to_unregister: logger.debug("unregistered client from workspace {0}".format(workspace_name)) self.leave_workspace(client, workspace_name) return
def push_reports(): vmanager = ViewsManager() try: logger.debug(u'Pushing Reports DB into CouchDB') couchdb_server = CouchDBServer() workspace = couchdb_server.get_or_create_db('reports') vmanager.addView(config.REPORTS_VIEWS_DIR, workspace) except: import traceback logger.debug(traceback.format_exc()) logger.warning("Reports database couldn't be uploaded. You need to be an admin to do it")
def create_by_dict(cls, role, post_data): logger.debug("str(uuid.uuid4().hex[0:6]:{}".format(str(uuid.uuid4().hex[0:6]))) return LoginModel.create( # change after dev username=post_data.get( "email", "{}@{}.hms.com".format(post_data.get("first_name") + post_data.get("last_name"), role) ), role=role, password=post_data.get("last_name") + str(uuid.uuid4().hex[0:6]), )
def onConnect(self, request): protocol, headers = None, {} # see if there already is a cookie set .. logger.debug('Websocket request {0}'.format(request)) if 'cookie' in request.headers: try: cookie = Cookie.SimpleCookie() cookie.load(str(request.headers['cookie'])) except Cookie.CookieError: pass return (protocol, headers)
def upload_views(workspace): """ Upload views with couchdb behind of ViewsManager """ vmanager = ViewsManager() try: vmanager.addViews(workspace) except: import traceback logger.debug(traceback.format_exc()) logger.warning( "Views documents couldn't be uploaded. You need to be an admin to do it" )
def get_data(addr): logger.debug("redis get_data, addr:{}".format(addr)) r = redis.Redis(host=ADDR) # pipe = r.pipeline() # pipe.get(addr) # pipe.execute() bdata = r.get(addr) if bdata: return bdata.decode("utf-8") else: return bdata
def unregister(self, client_to_unregister): """ Search for the client_to_unregister in all workspaces """ for workspace_name, clients in self.workspace_clients.items(): for client in clients: if client == client_to_unregister: logger.debug( "unregistered client from workspace {0}".format( workspace_name)) self.leave_workspace(client, workspace_name) return
def push_reports(): vmanager = ViewsManager() try: logger.debug(u'Pushing Reports DB into CouchDB') couchdb_server = CouchDBServer() workspace = couchdb_server.get_or_create_db('reports') vmanager.addView(config.REPORTS_VIEWS_DIR, workspace) except: import traceback logger.debug(traceback.format_exc()) logger.warning( "Reports database couldn't be uploaded. You need to be an admin to do it" )
def start_changes_monitor(self, changes_callback, last_seq=0): logger.debug( u'Starting changes monitor for workspace {} since {}'.format( self.__ws_name, last_seq)) ws_stream = ChangesStream(self.__ws_name, feed='continuous', since=last_seq, include_docs='true', heartbeat='true') self.__changes_monitor_thread = ChangesMonitorThread( ws_stream, changes_callback) self.__changes_monitor_thread.daemon = True self.__changes_monitor_thread.start()
def _create_and_import_db(db_conn, couchdb_conn): db_conn.create() db_conf = server.database.Configuration(db_conn) db_conf.set_last_seq(couchdb_conn.get_last_seq()) try: _import_from_couchdb(db_conn, couchdb_conn) except Exception, e: import traceback logger.debug(traceback.format_exc()) logger.error(u'Error while importing workspace {}: {!s}'.format(db_conn.db_name, e)) db_conn.delete() raise e
def make_appointment(post_data): """ make_appointment in the system. The post data is in json format. :param post_data: dict :returns: a status, a str( appointment's url on success, err info on failure) with database.atomic(): doctor = DoctorModel.create_by_dict(post_data) logger.debug(doctor) logger.debug('in database.atomic') except peewee.IntegrityError: logger.warning('in doctor model create except') 1. check if patient and doctor exist in db 2. check if the appointment exist in redis 3. make appointment if 1 and 2 ok 3.2 add the appointment to the doctor's that day's schedule 4. return if appointment exists, with reason if fail """ # print(post_data) try: logger.debug('in make_appointment') # check db when patient is ok rediscli.set_data( post_data['doctorid']+'/'+post_data['datetimeslot']+'/'+ post_data['patientid'], post_data) schedule = rediscli.get_data(post_data['doctorid']+'/'+ post_data['datetimeslot'][:8]) if schedule: schedule = ast.literal_eval(schedule) schedule[post_data['datetimeslot'][8:]] = '1' rediscli.set_data(post_data['doctorid']+'/'+post_data['datetimeslot'][:8], json.dumps(schedule)) else: schedule = {post_data['datetimeslot'][8:]:'1'} logger.debug('in make_appointment, schedule:{}'.format(json.dumps(schedule))) rediscli.set_data(post_data['doctorid']+'/'+post_data['datetimeslot'][:8], json.dumps(schedule)) except Exception as ex: logger.error('Exception: ', ex) # q = DoctorModel.delete().where(DoctorModel.uid==doctor) # q.execute() return 0, 'make_appointment failed, did not make_appointment' else: return 1, str(post_data['doctorid']+'/'+post_data['datetimeslot']+'/'+ post_data['patientid'])
def create_by_dict(cls, patientid, doctorid, post_data): user = PatientModel.get(PatientModel.email == patientid) logger.debug(type(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))) return CommentModel.create( # uid=str(uuid.uuid4()), patient=user, response_doctor=doctorid, comment_id=patientid + "-" + doctorid + "-" + post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), datetime=post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), comment=post_data.get("comment"), )
def create_by_dict(cls, patientid, post_data): user = PatientModel.get(PatientModel.email == patientid) logger.debug(type(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))) return ObjectModel.create( # uid=str(uuid.uuid4()), patient=user, objid=patientid + "-" + post_data.get("objname") + "-" + post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), objname=post_data.get("objname"), description=post_data.get("description", "p"), datetime=post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), )
def test_auth_success(self): logger.debug('in test_auth_success') adminid = 'admin_{}'.format(str(uuid.uuid4())) LoginModel.create( username=adminid, password='******', role='admin' ) adm_login = { 'username':adminid, 'password':'******', } success_token = mock.Mock(return_value='admintoken') auth.get_token = success_token self.assertEqual(auth.authentication('admin', adm_login)[0], 1) self.assertEqual(auth.authentication('admin', adm_login)[1], 'admintoken')
def update_entity_from_doc(self, document): """ ISSUES: * Updated relationships are not taken into account. Status: TODO """ entity = self.__get_modified_entity(document.get('_id')) if entity is not None: entity.update_from_document(document) entity.entity_metadata.update_from_document(document) self.__db_conn.session.commit() logger.info(u'A {} ({}) was updated in Workspace {}'.format( entity.entity_metadata.document_type, getattr(entity, 'name', '<no-name>'), self.__db_conn.db_name)) return True logger.debug(u'Document ({}) was not present in database to update'.format(document.get('_id'))) return False
def test_auth_fail(self): logger.debug('in test_auth_success fail') adminid = 'admin_{}'.format(str(uuid.uuid4())) LoginModel.create( username=adminid, password='******', role='admin' ) adm_login = { 'username':adminid, 'password':'******', } fail_token = mock.Mock(return_value='admintoken') auth.get_token = fail_token self.assertEqual(auth.authentication('admin', adm_login)[0], 0) self.assertEqual(auth.authentication('admin', adm_login)[1], 'auth failed, password not match') self.assertEqual(auth.authentication('doctor', adm_login)[0], 0) self.assertEqual(auth.authentication('doctor', adm_login)[1], 'auth failed')
def create_by_dict(cls, patientid, doctorid, post_data): user = PatientModel.get(PatientModel.email == patientid) logger.debug(type(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))) return PrescriptionModel.create( # uid=str(uuid.uuid4()), patient=user, response_doctor=doctorid, drug_id=patientid + "-" + post_data.get("drug_name") + "-" + post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), datetime=post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), drug_name=post_data.get("drug_name"), after_meal=post_data.get("after_meal"), amount=post_data.get("amount"), dosage_per_day=post_data.get("dosage_per_day"), description=post_data.get("description", ""), )
def run(self): for change_doc in self.__stream: try: self.__changes_callback(self.CHANGE_CLS(change_doc)) except Exception, e: import traceback logger.debug(traceback.format_exc()) logger.warning(u"Error while processing change. Ignoring. Offending change: {}".format(change_doc)) if change_doc.get('error', None): if change_doc.get('error') == 'unauthorized': logger.error(u"Unauthorized access to CouchDB. Make sure faraday-server's"\ " configuration file has CouchDB admin's credentials set") thread.interrupt_main() # TODO: A proper fix is needed here elif change_doc.get('reason') == 'no_db_file': self.__stream.stop() break
def update_entity_from_doc(self, document): """ ISSUES: * Updated relationships are not taken into account. Status: TODO """ entity = self.__get_modified_entity(document.get('_id')) if entity is not None: entity.update_from_document(document) entity.entity_metadata.update_from_document(document) self.__db_conn.session.commit() logger.info(u'A {} ({}) was updated in Workspace {}'.format( entity.entity_metadata.document_type, getattr(entity, 'name', '<no-name>'), self.__db_conn.db_name)) return True logger.debug( u'Document ({}) was not present in database to update'.format( document.get('_id'))) return False
def create_by_dict(cls, patientid, doctorid, post_data): user = PatientModel.get(PatientModel.email == patientid) logger.debug(type(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))) return DischargeModel.create( # uid=str(uuid.uuid4()), patient=user, response_doctor=doctorid, discharge_id=patientid + "-" + doctorid + "-" + post_data.get("indate", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), datetime=post_data.get("datetime", datetime.datetime.now().strftime("%Y%m%d%H%M%S")), indate=post_data.get("indate", datetime.datetime.now().strftime("%Y%m%d")), room=post_data.get("room", "not assigned"), bed=post_data.get("bed", "not assigned"), outdate=post_data.get("outdate", "not yet"), description=post_data.get("description", "no description"), )
def __iter__(self): while not self.__stop: try: # TODO: Connection timeout is too long. self.__response = requests.get(self.__url, params=self.__params, stream=True, auth=get_auth_info()) for raw_line in self.__response.iter_lines(): if self.__stop: break line = self.__sanitize(raw_line) if not line: continue change = self.__parse_change(line) if not change: continue yield change except Exception, e: # On workspace deletion, requests will probably # fail to perform the request or the connection # will be closed. Check if this was intentional # by checking on the __stop flag. if self.__stop: break import traceback logger.debug(traceback.format_exc()) # Close everything but keep retrying self.stop() self.__stop = False logger.warning( u"Lost connection to CouchDB. Retrying in 3 seconds...") time.sleep(3) logger.info(u"Retrying...")
def upload_obj(patientid, post_data): """ Upload an obj in the system. The post data is a dict. :param post_data: dict :returns: a status, a str( obj's info on success, err info on failure) """ # print(post_data) obj = '' try: logger.debug('in upload_obj') logger.debug('auth_url:{}, account_username:{}, password:{}'.format( conf.auth_url, conf.account_username, conf.password)) resp_dict = {} storage_url, auth_token = swiftclient.client.get_auth( conf.auth_url, conf.account_username, conf.password, auth_version=conf.auth_version) resp_dict['auth_token'] = auth_token resp_dict['storage_url'] = storage_url + '/' + \ conf.container + '/' + patientid + '/' + \ patientid + '-' + post_data['objname'] + '-' + post_data['datetime'] with database.atomic(): obj = ObjectModel.create_by_dict(patientid, post_data) logger.debug(obj) logger.debug('in database.atomic') # except peewee.IntegrityError: # logger.warning('in doctor model create except') # # `username` is a unique column, so this username already exists, # # making it safe to call .get(). # old_user = AccountModel.get(AccountModel.username == username) # logger.warning('user exists...') # resp_dict['info'] = 'user exists, did not create user:%s' % username # resp.status = falcon.HTTP_409 # try: # change_user = AccountModel.get(AccountModel.username==username, # AccountModel.password==password) # except: # logger.debug('change user data failed...') except Exception as ex: logger.error('Exception: ', ex) q = ObjectModel.delete().where(ObjectModel.objid== patientid + '-' + post_data['objname'] + '-' + post_data['datetime']) q.execute() return 0, {'errinfo':'create obj failed, did not create obj'} else: resp_dict['info'] = 'make PUT request to storage_url with auth_token as "x-storage-token" in headers' return 1, resp_dict
def process_change(self, change): logger.debug(u'New change for {}: {}'.format(self.__db_conn.db_name, change.change_doc)) if change.deleted: logger.debug(u'Doc {} was deleted'.format(change.doc_id)) self.delete_entity_from_doc_id(change.doc['_id']) elif change.updated: logger.debug(u'Doc {} was updated'.format(change.doc_id)) self.update_entity_from_doc(change.doc) elif change.added: if self.add_entity_from_doc(change.doc): logger.debug(u'Doc {} was added'.format(change.doc_id)) else: logger.debug(u"Doc {} was not added".format(change.doc_id)) if change.seq is not None: self.__db_conf.set_last_seq(change.seq) if self.__post_processing_change_cbk: self.__post_processing_change_cbk(change)
def get_discharges(patientid): """ Get info of a doctor in the system. :param doctorid: doctor's uid :returns: a status, a str ( doctor's info on success, err info on failure) """ # print(doctorid) logger.debug('in get_discharges') resp_list = [] try: patient = PatientModel.get(PatientModel.email==patientid) for discharge in DischargeModel.select().where(DischargeModel.patient==patient): logger.debug('discharge_id: %s, indate: %s' % (discharge.discharge_id, discharge.indate)) resp_dict = {} resp_dict['indate'] = discharge.indate resp_dict['outdate'] = discharge.outdate resp_dict['response_doctor'] = discharge.response_doctor resp_dict['description'] = discharge.description resp_dict['datetime'] = discharge.datetime resp_list.append(resp_dict) logger.debug('discharges:{}'.format(resp_list)) except Exception as ex: logger.error('Exception: ', ex) return 0, {'errinfo':'get discharges failed'} else: return 1, resp_list
def __iter__(self): while not self.__stop: try: # TODO: Connection timeout is too long. self.__response = requests.get( self.__url, params=self.__params, stream=True, auth=get_auth_info()) for raw_line in self.__response.iter_lines(): if self.__stop: break line = self.__sanitize(raw_line) if not line: continue change = self.__parse_change(line) if not change: continue yield change except Exception, e: # On workspace deletion, requests will probably # fail to perform the request or the connection # will be closed. Check if this was intentional # by checking on the __stop flag. if self.__stop: break import traceback logger.debug(traceback.format_exc()) # Close everything but keep retrying self.stop() self.__stop = False logger.warning(u"Lost connection to CouchDB. Retrying in 3 seconds...") time.sleep(3) logger.info(u"Retrying...")
def get_doctors(): """ Get info of doctors in the system. :returns: a status, a str ( doctor's info on success, err info on failure) """ # print(doctorid) logger.debug('in get_doctors') resp_list = [] try: # patient = DoctorModel.get(DoctorModel.email==patientid) doctors = DoctorModel.select() print(doctors) for doc in doctors: print('doc') logger.debug('docid: %s' % (doc)) resp_dict = {} resp_dict['doctorid'] = doc.email resp_dict['last_name'] = doc.last_name resp_dict['first_name'] = doc.first_name resp_list.append(resp_dict) logger.debug('doctors:{}'.format(resp_list)) except Exception as ex: logger.error('Exception: ', ex) return 0, {'errinfo':'get doctors failed'} else: return 1, resp_list
def get_objs(patientid): """ Get info of a doctor in the system. :param doctorid: doctor's uid :returns: a status, a str ( doctor's info on success, err info on failure) """ # print(doctorid) logger.debug('in get_objs') resp_list = [] try: patient = PatientModel.get(PatientModel.email==patientid) for obj in ObjectModel.select().where(ObjectModel.patient==patient): logger.debug('objid: %s, descrip: %s' % (obj.objid, obj.description)) resp_dict = {} resp_dict['objid'] = obj.objid resp_dict['objname'] = obj.objname resp_dict['description'] = obj.description resp_dict['datetime'] = obj.datetime resp_list.append(resp_dict) logger.debug('objs:{}'.format(resp_list)) except Exception as ex: logger.error('Exception: ', ex) return 0, {'errinfo':'get objs failed'} else: return 1, resp_list
def get_comments(patientid): """ Get info of a doctor in the system. :param doctorid: doctor's uid :returns: a status, a str ( doctor's info on success, err info on failure) """ # print(doctorid) logger.debug('in get_comments') resp_list = [] try: patient = PatientModel.get(PatientModel.email==patientid) for comment in CommentModel.select().where(CommentModel.patient==patient): logger.debug('comment_id: %s, comment: %s' % (comment.comment_id, comment.comment)) resp_dict = {} resp_dict['comment'] = comment.comment resp_dict['datetime'] = comment.datetime resp_dict['response_doctor'] = comment.response_doctor resp_list.append(resp_dict) logger.debug('comments:{}'.format(resp_list)) except Exception as ex: logger.error('Exception: ', ex) return 0, {'errinfo':'get comments failed'} else: return 1, resp_list
def authentication(role, post_data): """ Register a patient in the system. The post data is in json format. :param post_data: dict :returns: a status, a str( patient's info on success, err info on failure) """ # print(post_data) auth_dict = {} try: logger.debug('in authentication') user = LoginModel.get(LoginModel.username==post_data['username'], LoginModel.role==role) except Exception as ex: logger.error('Exception: ', ex) return 0, 'auth failed' else: logger.debug("user.password:{}, post_data['password']:{}".format( user.password, post_data['password'])) if user.password==post_data['password']: try: # auth_dict['token'] = get_token(post_data['username'], role) token = get_token(post_data['username'], role) except Exception as ex: logger.error('Exception: ', ex) return 0, 'auth failed, get token failed' else: # auth_json = json.dumps(auth_dict) logger.debug(token) return 1, token else: return 0, 'auth failed, password not match'
def get_prescriptions(patientid): """ Get info of a doctor in the system. :param doctorid: doctor's uid :returns: a status, a str ( doctor's info on success, err info on failure) """ # print(doctorid) logger.debug('in get_prescriptions') resp_list = [] try: patient = PatientModel.get(PatientModel.email==patientid) for prescription in PrescriptionModel.select().where(PrescriptionModel.patient==patient): logger.debug('drug_id: %s, descrip: %s' % (prescription.drug_id, prescription.description)) resp_dict = {} resp_dict['drug_name'] = prescription.drug_name resp_dict['after_meal'] = prescription.after_meal resp_dict['description'] = prescription.description resp_dict['datetime'] = prescription.datetime resp_dict['amount'] = prescription.amount resp_dict['dosage_per_day'] = prescription.dosage_per_day resp_dict['response_doctor'] = prescription.response_doctor resp_list.append(resp_dict) logger.debug('prescriptions:{}'.format(resp_list)) except Exception as ex: logger.error('Exception: ', ex) return 0, {'errinfo':'get prescriptions failed'} else: return 1, resp_list
def check_appointment(doctorid, date): """ Get info of appointments for a doctor on a date in the system. :param appointment_url: appointment's url :returns: a status, a str ( appointments timeslots info on success, err info on failure) """ # print(doctorid, date) # info = {} try: logger.debug('in check_appointment') schedule = rediscli.get_data(doctorid+'/'+date) except Exception as ex: logger.error('Exception: ', ex) return 0, 'check_appointment failed' else: logger.debug('in check_appointment schedule data:{}'.format(schedule)) if schedule: logger.debug('in check_appointment schedule data:{}'.format(schedule)) # appointments_json = json.dumps(schedule) return 1, compatible(schedule) # no appointment for this doctor on this date else: return 1, compatible('{}')
def __wait_until_database_is_sync(self, timeout): """ This function will establish a milestone by asking CouchDB's last update sequence number to then wait for an event signal from the changes monitor when its last procesed change is newer or as new as this milestone. If synchronization isn't achieved in <timeout> seconds it will return False, communicating that data consistency can be ensured after this call. """ self.__set_sync_milestone() logger.debug( u"Waiting until synchronization with CouchDB (ws: {}, couchdb: {})" .format(self.__last_seq, self.__sync_seq_milestone)) self.__data_sync_event.wait(timeout) is_sync = self.__data_sync_event.is_set() if is_sync: logger.debug(u"Synchronized with CouchDB to seq {}".format( self.__last_seq)) else: logger.debug( u"Synchronization timed out. Working with outdated database") return is_sync
def __process_change(self, change): logger.debug(u'New change for {}: {}'.format(self.__workspace, change.change_doc)) if change.deleted: logger.debug(u'Doc {} was deleted'.format(change.doc_id)) self.__process_del(change) elif change.updated: logger.debug(u'Doc {} was updated'.format(change.doc_id)) self.__process_update(change) elif change.added: logger.debug(u'Doc {} was added'.format(change.doc_id)) self.__process_add(change) self.__update_last_seq(change)
def create_database(self): logger.info(u'Creating database for workspace {}'.format( self.__workspace)) self.database.create() self.database.open_session() try: # Add metadata information to database self.set_last_seq(self.couchdb.get_last_seq()) self.set_migration_status(False) self.set_schema_version() self.import_from_couchdb() # Reaching this far without errors means a successful migration self.set_migration_status(True) except Exception, e: import traceback logger.debug(traceback.format_exc()) logger.error(u'Error while importing workspace {}: {!s}'.format( self.__workspace, e)) self.delete() raise e
def delete_entity_from_doc_id(self, document_id): """ ISSUES: * Delete child entities. Have not found cases where this is a problem. So far, clients are deleting all CouchDBs documents properly, and if they don't, the DBs still are consistent. Maybe use SQLAlchemy's cascades if this become a problem. Status: Somewhat OK * Doc ID maps to multiple elements. This could happen since the ID is a hash based in a few entity's properties which can be replicated. Status: TODO """ entity = self.__get_modified_entity(document_id) if entity is not None: self.__db_conn.session.delete(entity) self.__db_conn.session.commit() logger.info(u'A {} ({}) was deleted in Workspace {}'.format( entity.entity_metadata.document_type, getattr(entity, 'name', '<no-name>'), self.__db_conn.db_name)) return True logger.debug( u'Document ({}) was not present in database to delete'.format( document_id)) return False
def join_workspace(self, client, workspace): logger.debug('Join workspace {0}'.format(workspace)) if client not in self.workspace_clients[workspace]: logger.debug("registered client {}".format(client.peer)) self.workspace_clients[workspace].append(client)
def leave_workspace(self, client, workspace_name): logger.debug('Leave workspace {0}'.format(workspace_name)) self.workspace_clients[workspace_name].remove(client)
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.time() - context._query_start_time logger.debug(u"Query Complete. Total Time: {:.02f}ms".format(total * 1000))
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): context._query_start_time = time.time() logger.debug(u"Start Query:\n{}".format(statement)) logger.debug(u"Parameters:\n{!r}".format(parameters))
def __start_database_synchronization(self): self.__last_seq = self.get_last_seq() logger.debug(u'Workspace {} last update: {}'.format( self.__workspace, self.__last_seq)) self.couchdb.start_changes_monitor(self.__process_change, last_seq=self.__last_seq)