def host_power_operation(): logger.debug("\nIn host power operation function\n-----------------------------------\n") livehosts = current.db(current.db.host.status == HOST_STATUS_UP).select() freehosts=[] try: for host_data in livehosts: if not has_running_vm(host_data.host_ip.private_ip): freehosts.append(host_data.host_ip.private_ip) freehostscount = len(freehosts) if(freehostscount == 2): logger.debug("Everything is Balanced. Green Cloud :)") elif(freehostscount < 2): logger.debug("Urgently needed "+str(2-freehostscount)+" more live hosts.") newhosts = current.db(current.db.host.status == HOST_STATUS_DOWN).select()[0:(2-freehostscount)] #Select only Shutoff hosts for host_data in newhosts: logger.debug("Sending magic packet to "+host_data.host_name) host_power_up(host_data) elif(freehosts > 2): logger.debug("Sending shutdown signal to total "+str(freehostscount-2)+" no. of host(s)") extrahosts=freehosts[2:] for host_data in extrahosts: logger.debug("Moving any dead vms to first running host") migrate_all_vms_from_host(host_data.host_ip.private_ip) logger.debug("Sending kill signal to " + host_data.host_ip.private_ip) commands.getstatusoutput("ssh root@" + host_data.host_ip.private_ip + " shutdown -h now") host_data.update_record(status=HOST_STATUS_DOWN) except: log_exception() return
def clear_all_timedout_vnc_mappings(): # Get all active VNC mappings from DB current.db("FLUSH QUERY CACHE") vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & (current.db.vnc_access.expiry_time < get_datetime())).select() if (vnc_mappings != None) & (len(vnc_mappings) != 0): for mapping in vnc_mappings: logger.debug('Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' %(mapping.vm_id, mapping.host_id, mapping.token, mapping.vnc_source_port, mapping.vnc_destination_port)) f = open("/home/www-data/token.list","r") lines = f.readlines() f.close() f = open("/home/www-data/token.list","w") token = mapping.token logger.debug("token is : " + str(token)) logger.debug("token type is : " + str(type(token))) for line in lines: if token not in line: logger.debug("lines are : " + str(line)) f.write(line) f.close() current.db(current.db.vnc_access.id == mapping.id).delete() current.db.commit() logger.debug("Done clearing novnc mappings") else: raise Exception("NAT type is not supported")
def _options(fieldname): """ Lookup the full set of options for a Filter Widget - for Subscriptions we don't want to see just the options available in current data """ if fieldname == "event_type_id": T = current.T etable = current.s3db.event_event_type rows = current.db(etable.deleted == False).select(etable.id, etable.name) options = {} for row in rows: options[row.id] = T(row.name) elif fieldname == "priority": T = current.T wptable = current.s3db.cap_warning_priority rows = current.db(wptable.deleted == False).select(wptable.id, wptable.name) options = {} for row in rows: options[row.id] = T(row.name) elif fieldname == "location_id": ltable = current.s3db.gis_location query = (ltable.deleted == False) # IDs converted inside widget's _options() function rows = current.db(query).select(ltable.id) options = [row.id for row in rows] return options
def latest_records(resource, layout, listid, limit, list_fields, orderby): """ Display a dataList of the latest records for a resource """ #orderby = resource.table[orderby] datalist, numrows, ids = resource.datalist(fields=list_fields, start=None, limit=limit, listid=listid, orderby=orderby, layout=layout) if numrows == 0: # Empty table or just no match? table = resource.table if "deleted" in table: available_records = current.db(table.deleted != True) else: available_records = current.db(table._id > 0) if available_records.select(table._id, limitby=(0, 1)).first(): msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_no_match"), _class="empty") else: msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_list_empty"), _class="empty") data = msg else: # Render the list dl = datalist.html() data = dl return data
def __call__(self): request = current.request response = current.response view = path.join(request.folder, "private", "templates", "AidIQ", "views", "index.html") try: # Pass view as file not str to work in compiled mode response.view = open(view, "rb") except IOError: from gluon.http import HTTP raise HTTP("404", "Unable to open Custom View: %s" % view) T = current.T page = request.get_vars.get("page", None) if page: vars = {"page":page} table = current.s3db.cms_post row = current.db(table.name == page).select(table.id, table.title, table.body, limitby=(0, 1)).first() else: module = "default" vars = {"module":module} table = current.s3db.cms_post row = current.db(table.module == module).select(table.id, table.title, table.body, limitby=(0, 1)).first() title = None if row: title = row.title if current.auth.s3_has_role(current.session.s3.system_roles.ADMIN): item = DIV(XML(row.body), BR(), A(T("Edit"), _href=URL(c="cms", f="post", args=[row.id, "update"], vars=vars), _class="action-btn")) else: item = XML(row.body) elif current.auth.s3_has_role(current.session.s3.system_roles.ADMIN): item = A(T("Edit"), _href=URL(c="cms", f="post", args="create", vars=vars), _class="action-btn") else: item = None if not title: title = current.deployment_settings.get_system_name() response.title = title return dict(content=item)
def tearDownClass(cls): # Remove the dynamic table s3db = current.s3db ttable = s3db.s3_table query = (ttable.name == cls.TABLENAME) current.db(query).delete()
def check_in(self, table, record, timestmp=None): """ Bind the presence of the instance(s) to another instance @param table: table name of the other resource @param record: record in the other resource (as Row or record ID) @param timestmp: datetime of the check-in @returns: nothing """ ptable = current.db[self.PRESENCE] if isinstance(table, str): table = current.db[table] fields = self.__get_fields(table) if not fields: raise SyntaxError("No location data in %s" % table._tablename) interlock = None if isinstance(record, Rows): record = record.first() if not isinstance(record, Row): record = table[record] if self.__super_entity(record): table = current.db[record.instance_type] fields = self.__get_fields(table, super_entity=False) if not fields: raise SyntaxError("No trackable type: %s" % table._tablename) query = table[self.UID] == record[self.UID] record = current.db(query).select(limitby=(0, 1)).first() if record and table._id.name in record: record = record[table._id.name] if record: interlock = "%s,%s" % (table, record) else: raise SyntaxError("No record specified for %s" % table._tablename) if interlock: if timestmp is None: timestmp = datetime.utcnow() data = dict(location_id=None, timestmp=timestmp, interlock=interlock) q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp)) for r in self.records: if self.TRACK_ID not in r: # Cannot check-in a non-trackable continue query = q & (ptable[self.TRACK_ID] == r[self.TRACK_ID]) presence = current.db(query).select(orderby=~ptable.timestmp, limitby=(0, 1)).first() if presence and presence.interlock == interlock: # already checked-in to the same instance continue data.update({self.TRACK_ID:r[self.TRACK_ID]}) ptable.insert(**data) self.__update_timestamp(r[self.TRACK_ID], timestmp)
def set_base_location(self, location=None): """ Set the base location of the instance(s) @param location: the location for the base location as Row or record ID @returns: nothing @note: instance tables without a location_id field will be ignored """ if isinstance(location, S3Trackable): location = location.get_base_location() if isinstance(location, Rows): location = location.first() if isinstance(location, Row): location = location.id if not location or not str(location).isdigit(): # Location not found return else: data = {self.LOCATION_ID:location} # Update records without track ID for r in self.records: if self.TRACK_ID in r: continue elif self.LOCATION_ID in r: if hasattr(r, "update_record"): r.update_record(**data) else: raise SyntaxError("Cannot relate record to a table.") # Update records with track ID # => this can happen table-wise = less queries track_ids = [r[self.TRACK_ID] for r in self.records if self.TRACK_ID in r] rows = current.db(self.table[self.TRACK_ID].belongs(track_ids)).select() tables = [] for r in rows: instance_type = r.instance_type table = current.db[instance_type] if instance_type not in tables and \ self.LOCATION_ID in table.fields: tables.append(table) else: # No location ID in this type => ignore gracefully continue # Location specified => update all base locations for table in tables: current.db(table[self.TRACK_ID].belongs(track_ids)).update(**data) # Refresh records for r in self.records: if self.LOCATION_ID in r: r[self.LOCATION_ID] = location
def _a(*args, **kwargs): projectid = current.request.args(0) if projectid is None: raise HTTP(404, "Project not found") preview = False project = ( current.db( current.searchableprojects & current.joinprojectstats & current.joinopenproject & current.joinmanager & (current.db.project.idproject == projectid) ) .select( current.db.project.ALL, current.db.projectstat.ALL, current.db.openproject.ALL, current.db.user.ALL ) .first() ) if project is None: if self.allow_preview: project = ( current.db( current.myprojects & current.joinprojectstats & current.joinmanager & (current.db.project.idproject == projectid) ) .select(current.db.project.ALL, current.db.projectstat.ALL, current.db.user.ALL) .first() ) preview = True if project is None: raise HTTP(404, "This project either doesn't exist or hasn't been made public yet.") if self.requires_edit and not project.project.canedit(): raise HTTP(400, "You cannot edit this project") if self.requires_delete and not project.project.candelete(): raise HTTP(400, "You cannot delete this project") if self.requires_open and not project.project.canopen(): raise HTTP(400, "You cannot open this project") if self.requires_open and len(project.project.pledges()) == 0: raise HTTP(400, "Project needs pledge levels before it can be opened") if self.requires_close and not project.project.canclose(): raise HTTP(400, "You cannot close this project") current.request.vars["project"] = project current.request.vars["preview"] = preview return function(*args, **kwargs)
def send(self, resource, start=None, limit=None, msince=None, filters=None, mixed=False, pretty_print=False): """ Respond to an incoming pull from the peer repository @param resource: the resource to be synchronized @param start: index of the first record to send @param limit: maximum number of records to send @param msince: minimum modification date/time for records to send @param filters: URL filters for record extraction @param mixed: negotiate resource with peer (disregard resource) @param pretty_print: make the output human-readable @return: a dict {status, remote, message, response}, with: - status....the outcome of the operation - remote....whether the error was remote (or local) - message...the log message - response..the response to send to the peer """ if not resource or mixed: msg = "Mixed resource synchronization not supported" return {"status": self.log.FATAL, "message": msg, "response": current.xml.json_message(False, 400, msg), } # Export the data as S3XML output = resource.export_xml(start = start, limit = limit, filters = filters, msince = msince, pretty_print = pretty_print, ) count = resource.results msg = "Data sent to peer (%s records)" % count # Update date/time of last incoming connection current.db(current.s3db.sync_repository.id == self.repository.id).update( last_connected = datetime.datetime.utcnow(), ) # Set content type header headers = current.response.headers headers["Content-Type"] = "text/xml" return {"status": self.log.SUCCESS, "message": msg, "response": output, }
def force_integrity(self): if self.user_id >0: if current.request.controller not in ['address','card','user']: addresses = current.db(current.db.address.userid==self.user_id).count() if addresses == 0: redirect(URL("address","create", args=['register'])) cards = current.db(current.db.card.userid==self.user_id).count() if cards == 0: redirect(URL("card","register"))
def add_or_update_user_memberships(user_id, roles, update_session): current_roles = current.db((user_id == current.db.user_membership.user_id) & (current.db.user_membership.group_id == current.db.user_group.id)).select(current.db.user_group.role).as_list() logger.info("users current roles: %s", current_roles) if current_roles != roles: current.db(current.db.user_membership.user_id == user_id).delete() for role in roles: add_membership_db(user_id, role, update_session)
def create_public_ip_mapping_in_nat(vm_id): vm_data = current.db.vm_data[vm_id] try: create_mapping(vm_data.public_ip, vm_data.private_ip) logger.debug("Updating DB") current.db(current.db.public_ip_pool.public_ip == vm_data.public_ip).update(vm_id = vm_id) except: log_exception()
def create_or_update_user(user_info, update_session): user_name = user_info['user_name'] user = current.db(current.db.user.username == user_name).select().first() if not user: #create user user = current.db.user.insert(username=user_name, registration_id=user_name) current.db(current.db.user.username==user_name).update(first_name = user_info['first_name'], last_name = user_info['last_name'], email = user_info['email'])
def add_membership_db(_user_id, role, update_session): #Find the group_id for the given role _group_id = current.db(current.db.user_group.role==role).select(current.db.user_group.id).first()['id'] _org_id = current.db(current.db.user.id == _user_id).select(current.db.user.organisation_id).first()['organisation_id'] if _group_id !=0: current.db.user_membership.insert(user_id=_user_id,group_id=_group_id) if update_session: # add role to the current session current.auth.user_groups[long(_group_id)] = role current.auth.organisation_id = _org_id current.logger.debug(_org_id) current.logger.debug(role)
def remove_vnc_mapping_from_nat(vm_id): vm_data = current.db.vm_data[vm_id] vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip") host_ip = vm_data.host_id.host_ip.private_ip vnc_port = vm_data.vnc_port try: remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port) logger.debug("Updating DB") current.db(current.db.vnc_access.vm_id == vm_id).update(status = VNC_ACCESS_STATUS_INACTIVE) except: log_exception()
def __update_timestamp(self, track_id, timestamp): """ Update the timestamp of a trackable @param track_id: the trackable ID (super-entity key) @param timestamp: the timestamp """ if track_id: if timestamp is None: timestamp = datetime.utcnow() current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)
def create_master_file(self): """ Function to create a master file containing all the strings """ try: import cPickle as pickle except: import pickle A = TranslateAPI() modlist = A.get_modules() modlist.append("core") # List containing all the strings in the eden code all_strings = [] asappend = all_strings.append # Dictionary keyed on modules containg the indices of strings # in all_strings which belong to the corresponding module string_dict = {} ind = 0 for mod in modlist: string_list = [] sappend = string_list.append strings = A.get_strings_by_module(mod) for (l, s) in strings: # Removing quotes around the strings if (s[0] == '"' and s[-1] == '"') or (s[0] == "'" and s[-1] == "'"): s = s[1:-1] if s not in all_strings: asappend(s) sappend(ind) ind += 1 else: tmpind = all_strings.index(s) sappend(tmpind) string_dict[mod] = string_list # Save all_strings and string_dict as pickle objects in a file data_file = os.path.join(current.request.folder, "uploads", "temp.pkl") f = open(data_file, "wb") pickle.dump(all_strings, f) pickle.dump(string_dict, f) f.close() # Set the update flag for all languages to indicate that the # previously stored percentages of translation may have changed # as the master file has been changed. utable = current.s3db.translate_update current.db(utable.id > 0).update(sbit=True)
def lookup_rows(self, key, values, fields=None): """ Custom lookup method for need rows, does a left join with the tag. Parameters key and fields are not used, but are kept for API compatibility reasons. @param values: the need IDs """ s3db = current.s3db ntable = s3db.req_need nttable = s3db.req_need_tag left = nttable.on((nttable.need_id == ntable.id) & \ (nttable.tag == "req_number")) qty = len(values) if qty == 1: query = (ntable.id == values[0]) limitby = (0, 1) else: query = (ntable.id.belongs(values)) limitby = (0, qty) rows = current.db(query).select(ntable.id, ntable.name, nttable.value, left=left, limitby=limitby) self.queries += 1 return rows
def login_callback(form): if current.auth.is_logged_in(): member = current.db(current.db.user_membership.user_id == current.auth.user.id).select().first() if not member: roles = fetch_user_role(current.auth.user.username) for role in roles: add_membership_db(current.auth.user.id, role, True)
def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True table = r.table # Simple Location Represent #table.location_id.represent = S3Represent(lookup="gis_location") # We only use a single type so hard-code it table.series_id.readable = table.series_id.writable = False if not r.record: stable = current.s3db.cms_series row = current.db(stable.name == "News").select(stable.id, limitby=(0, 1) ).first() try: table.series_id.default = row.id except: # Prepop not done # Undo the readable/writable so as not to mask the error table.series_id.readable = table.series_id.writable = True return result
def update_record(self, table, record_id, row, data): form = Storage(vars = Storage([(f, row[f]) for f in table.fields if f in row])) form.vars.update(data) try: current.db(table._id==row[table._id]).update(**data) except Exception: self.raise_error("Could not update %s.%s" % (table._tablename, record_id)) else: s3db = current.s3db s3db.update_super(table, form.vars) current.auth.s3_set_record_owner(table, row[table._id], force_update=True) s3db.onaccept(table, form, method="update") return form.vars
def vol_rheader(r): if r.representation != "html": # RHeaders only used in interactive views return None record = r.record if record is None: # List or Create form: rheader makes no sense here return None #from gluon.html import DIV person_id = r.id s3db = current.s3db table = s3db.hrm_human_resource hr = current.db(table.person_id == person_id).select(table.organisation_id, limitby=(0, 1)).first() if hr: if current.auth.user.organisation_id != hr.organisation_id: # Only show Org if not the same as user's rheader = table.organisation_id.represent(hr.organisation_id) else: rheader = None else: # Something went wrong! rheader = None return rheader
def _respond_drequest(message, report_id, response, text): """ Parse Replies To Deployment Request """ # Can we identify the Human Resource? hr_id = S3Parsing().lookup_human_resource(message.from_address) if hr_id: rtable = current.s3db.irs_ireport_human_resource query = (rtable.ireport_id == report_id) & (rtable.human_resource_id == hr_id) current.db(query).update(reply=text, response=response) reply = "Response Logged in the Report (Id: %d )" % report_id else: reply = None return reply
def is_session_alive(from_address): """ Check whether there is an alive sessions from the same sender """ email = None now = current.request.utcnow stable = current.s3db.msg_session query = (stable.is_expired == False) & \ (stable.from_address == from_address) records = current.db(query).select(stable.id, stable.created_datetime, stable.expiration_time, stable.email, ) for record in records: time = record.created_datetime time = time - now time = time.total_seconds() if time < record.expiration_time: email = record.email break else: record.update_record(is_expired = True) return email
def lookup_rows(self, key, values, fields=None): """ Custom lookup method for activity rows, does a left join with the tag. Parameters key and fields are not used, but are kept for API compatibility reasons. @param values: the activity IDs """ s3db = current.s3db atable = s3db.project_activity aotable = s3db.project_activity_organisation left = aotable.on((aotable.activity_id == atable.id) & \ (aotable.role == 1)) qty = len(values) if qty == 1: query = (atable.id == values[0]) limitby = (0, 1) else: query = (atable.id.belongs(values)) limitby = (0, qty) rows = current.db(query).select(atable.id, atable.name, aotable.organisation_id, left=left, limitby=limitby) self.queries += 1 return rows
def site_check_out(site_id, person_id): """ When a person is checked-out from a Shelter then update the Shelter Registration """ s3db = current.s3db # Find the Registration stable = s3db.cr_shelter rtable = s3db.cr_shelter_registration query = (stable.site_id == site_id) & \ (stable.id == rtable.shelter_id) & \ (rtable.person_id == person_id) registration = current.db(query).select(rtable.id, rtable.registration_status, limitby=(0, 1), ).first() if not registration: error = T("Registration not found") warning = None return error, warning if registration.registration_status == 3: error = None warning = T("Client was already checked-out") return error, warning # Update the Shelter Registration registration.update_record(registration_status = 3) onaccept = s3db.get_config("cr_shelter_registration", "onaccept") if onaccept: onaccept(registration) return None, None
def user_authenticate(self, password): credential = current.db(current.db.credential.userid == self.user_id).select(current.db.credential.passwordsalt, current.db.credential.passwordhash).first() if self.generate_hash(credential.passwordsalt,password) == credential.passwordhash: return True else: return False
def custom_prep(r): # Call standard prep if callable(standard_prep): if not standard_prep(r): return False if r.controller == "deploy": # Popups in RDRT Member Profile table = r.table job_title_id = table.job_title_id job_title_id.label = T("Sector / Area of Expertise") job_title_id.comment = None jtable = current.s3db.hrm_job_title query = (jtable.type == 4) if r.method == "update" and r.record.job_title_id: # Allow to keep the current value query |= (jtable.id == r.record.job_title_id) from s3 import IS_ONE_OF job_title_id.requires = IS_ONE_OF(current.db(query), "hrm_job_title.id", job_title_id.represent, ) job_title = table.job_title job_title.readable = job_title.writable = True return True
def _a(*args, **kwargs): rewardid = current.request.args(0) if rewardid is None: raise HTTP(404, "No reward specified") reward = ( current.db( (current.db.reward.idreward == rewardid) & current.myprojects & (current.db.project.idproject == current.db.reward.projectid) ) .select(current.db.project.ALL, current.db.reward.ALL) .first() ) if reward is None: raise HTTP(404, "Reward does not exist") if not reward.project.canedit(): raise HTTP(403, "Cannot edit this project") current.request.vars["reward"] = reward return function(*args, **kwargs)
def default_parameter_filter(selector, tablename=None): ptable = s3db.stats_parameter query = (ptable.deleted == False) & \ (ptable.name == "Cases") row = current.db(query).select(ptable.parameter_id, limitby = (0, 1)).first() if row: return row.parameter_id else: return None
def create_barecode(product_id): """Return the generated barecode from a product """ mylogger.debug(message='create_barecode') product_cas_number = current.db(current.db.product.id == product_id).select(current.db.product.cas_number).first().cas_number mylogger.debug(message='product_id:%s' % product_id) mylogger.debug(message='product_cas_number:%s' % product_cas_number) last_storage_id = current.db(current.db.storage).count() mylogger.debug(message='last_storage_id:%s' % last_storage_id) today = datetime.date.today() today = today.strftime('%Y%m%d') barecode = '%s_%s_%s.1' % (product_cas_number, today, last_storage_id) mylogger.debug(message='barecode:%s' % barecode) return barecode
def delete(self, entity): """Delete an entity. entity -- an ENTITY instance return: the last row id """ _id = current.db(current.db.entity.id == entity.id).delete() current.db.commit() return _id
def compute_nb_user(self, entity_id): """Get the number of users for the given entity. entity_id -- the id of the entity """ admin_id = [a.id for a in cc.get_admins()] return current.db( (current.auth.settings.table_membership.group_id == entity_id) & (~current.auth.settings.table_membership.user_id.belongs( tuple(admin_id)))).count()
def compute_nb_store_location(self, entity_id): """Returns the number of store_locations of the given entity. entity_id -- the id of the entity """ _count = current.db( current.db.store_location.entity == entity_id).count() my_logger.debug(message='_count:%s' % _count) return _count
def retrieve_borrower(self, storage_id): borrower = current.db((current.db.borrow.storage == storage_id) & ( current.db.borrow.borrower == current.db.person.id)).select( current.db.person.id).first() if borrower is not None: return PERSON_MAPPER().find(person_id=borrower.id)[0] else: return None
def login_ldap_callback(form): if current.auth.is_logged_in(): user_name = current.auth.user.username if current.db(current.db.user.username == user_name).select( current.db.user.last_name)[0]['last_name'] == "": user_info = fetch_ldap_user(user_name) if user_info: create_or_update_user(user_info, True) else: current.logger.error('Unable To Update User Info!!!')
def config(self): """ Lazy access to the current sync config """ if self._config is None: table = current.s3db.sync_config row = current.db().select(table.ALL, limitby=(0, 1)).first() self._config = row return self._config
def get_child_message(message_id, depth): child_messages = current.db( current.db.message.parent == message_id).select( orderby=current.db.message.id) return [[ depth + 1, child_message.id, get_child_message(child_message.id, depth + 1) ] for child_message in child_messages]
def host_status_sanity_check(): for host in current.db().select(current.db.host.ALL): if host.status != HOST_STATUS_MAINTENANCE: host_status=check_host_status(host.host_ip.private_ip) if(host_status != host.status): logger.debug("Changing status of " + host.host_name +" to " + str(host_status)) host.update_record(status=host_status) current.db.commit() if host_status == HOST_STATUS_DOWN: respawn_dangling_vms(host.id)
def testImportMerge(self): s3db = current.s3db UUID = "org_organisation.uuid" DELETED = "org_organisation.deleted" REPLACEDBY = "org_organisation.deleted_rb" uids = ["TESTIMPORTMERGEORG1", "TESTIMPORTMERGEORG2"] test_fields = ["id", "uuid", "deleted", "deleted_rb", "office.name"] # Check the existing records resource = s3db.resource("org_organisation", uid =uids, include_deleted = True) result = resource.select(test_fields, limit=None)["rows"] self.assertEqual(len(result), 2) for record in result: self.assertTrue(record[UUID] in uids) self.assertFalse(record[DELETED]) self.assertEqual(record[REPLACEDBY], None) if record[UUID] == "TESTIMPORTMERGEORG2": self.assertEqual(record["org_office.name"], "TestImportMergeOffice2") else: self.assertEqual(record["org_office.name"], None) # Send the merge xmlstr = """ <s3xml> <resource name="org_organisation" uuid="TESTIMPORTMERGEORG1"> <data field="name">TestImportMergeOrg1</data> </resource> <resource name="org_organisation" uuid="TESTIMPORTMERGEORG2" deleted="True" replaced_by="TESTIMPORTMERGEORG1" /> </s3xml>""" xmltree = etree.ElementTree(etree.fromstring(xmlstr)) resource = current.s3db.resource("org_organisation") msg = resource.import_xml(xmltree) #print msg self.assertEqual(resource.error, None) # Check the result resource = s3db.resource("org_organisation", uid =uids, include_deleted = True) result = resource.select(test_fields, limit=None)["rows"] self.assertEqual(len(result), 2) for record in result: if record[UUID] == "TESTIMPORTMERGEORG1": self.assertFalse(record[DELETED]) self.assertEqual(record[REPLACEDBY], None) elif record[UUID] == "TESTIMPORTMERGEORG2": self.assertTrue(record[DELETED]) replaced_by = record[REPLACEDBY] row = current.db(resource.table._id == replaced_by) \ .select(resource.table.uuid, limitby=(0, 1)) \ .first() self.assertEqual(row.uuid, "TESTIMPORTMERGEORG1")
def latest_4_posts(series_filter, layout): resource.add_filter( S3FieldSelector("series_id$name") == series_filter) list_fields = [ "series_id", "location_id", "date", "body", "created_by", "created_by$organisation_id", "document.file", "event_post.event_id", ] orderby = resource.table.date resource.add_filter(resource.table.date >= current.request.now) datalist, numrows, ids = resource.datalist(fields=list_fields, start=None, limit=4, listid="news_datalist", orderby=orderby, layout=layout) if numrows == 0: # Empty table or just no match? table = resource.table if "deleted" in table: available_records = current.db(table.deleted != True) else: available_records = current.db(table._id > 0) if available_records.select(table._id, limitby=(0, 1)).first(): msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_no_match"), _class="empty") else: msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_list_empty"), _class="empty") data = msg else: # Render the list dl = datalist.html() data = dl return data
def organisation_needs(row, need_type=None): """ Field.Method to render structured organisation needs (list views) @param row: the row (passed from Field.Method) @param need_type: the need type (skills|items) """ NONE = current.messages["NONE"] try: needs = getattr(row, "req_organisation_needs") except AttributeError: return NONE needs_id = needs.id s3db = current.s3db if need_type == "skills": ltable = s3db.req_organisation_needs_skill stable = s3db.hrm_skill left = stable.on(stable.id == ltable.skill_id) elif need_type == "items": ltable = s3db.req_organisation_needs_item stable = s3db.supply_item left = stable.on(stable.id == ltable.item_id) query = (ltable.organisation_needs_id == needs_id) rows = current.db(query).select( ltable.demand, stable.name, left=left, ) if not rows: return NONE needs = {} dfield = str(ltable.demand) nfield = str(stable.name) for row in rows: demand = row[dfield] if demand not in needs: needs[demand] = [row[nfield]] else: needs[demand].append(row[nfield]) T = current.T output = DIV(_class="org-needs") for demand in (4, 3, 2, 1): if demand not in needs: continue title = "%s:" % T(demand_options[demand]) items = UL([LI(T(need)) for need in needs[demand] if need is not None]) output.append(TAG[""](title, items)) return output
def get_nb_children(self, store_location_id): """Return the number of children of the given store_location. store_location_id -- the store location id """ my_logger.debug(message='store_location_id=%s' % store_location_id) _count = current.db( current.db.store_location.parent == store_location_id).count() my_logger.debug(message='_count=%s' % _count) return _count
def load_exercise_metrics(self, exercise): self.course = current.db( current.db.courses.id == self.course_id).select().first() self.users = current.db(current.db.auth_user.course_id == current.auth.user.course_id).select( current.db.auth_user.username, current.db.auth_user.first_name, current.db.auth_user.last_name) self.logs = current.db( (current.db.useinfo.course_id == self.course.course_name) & (current.db.useinfo.timestamp >= self.course.term_start_date) ).select(current.db.useinfo.timestamp, current.db.useinfo.sid, current.db.useinfo.event, current.db.useinfo.act, current.db.useinfo.div_id, orderby=current.db.useinfo.timestamp) self.problem_metrics = CourseProblemMetrics(self.course_id, self.users, self.db_chapter) self.problem_metrics.update_metrics(self.course.course_name)
def bootup_host(host_id_list): logger.info('Starting Host Bootup') # pool = ThreadPool(THREAD_POOL_COUNT) vms = current.db( ~current.db.vm_data.status.belongs(current.VM_STATUS_UNKNOWN, current.VM_STATUS_IN_QUEUE) & current.db.vm_data.host_id.belongs(host_id_list)).select() for vm_detail in vms: revert_and_resume(vm_detail.id, vm_detail.vm_identity)
def is_in_entity_of(self, product_id, user_id): """Return True if the product is stored in one of the entities of the given user. product_id -- the product id user_id -- the user id """ if current.auth.has_membership(user_id=user_id, role='all_entity'): _product_entities_count = current.db((current.db.product.id == product_id) & (current.db.storage.product == current.db.product.id) & (current.db.storage.archive == False)).count() else: _product_entities_count = current.db((current.db.product.id == product_id) & (current.db.storage.product == current.db.product.id) & (current.db.storage.archive == False) & (current.db.storage.store_location == current.db.store_location.id) & (current.db.store_location.entity == current.db.entity.id) & (current.db.membership.user_id == user_id) & (current.db.entity.id == current.db.membership.group_id)).count() return _product_entities_count > 0
def update_metrics(self, course_name): rslogger.debug("Updating CourseProblemMetrics for {} of {}".format( self.chapter, course_name)) rslogger.debug("doing chapter {}".format(self.chapter)) # todo: Join this with questions so that we can limit the questions to the selected chapter mcans = current.db( (current.db.mchoice_answers.course_name == course_name) & (current.db.mchoice_answers.div_id == current.db.questions.name) & (current.db.questions.chapter == self.chapter.chapter_label) ).select(orderby=current.db.mchoice_answers.timestamp) rslogger.debug("Found {} exercises") fbans = current.db( (current.db.fitb_answers.course_name == course_name) & (current.db.fitb_answers.div_id == current.db.questions.name) & (current.db.questions.chapter == self.chapter.chapter_label) ).select(orderby=current.db.fitb_answers.timestamp) psans = current.db( (current.db.parsons_answers.course_name == course_name) & (current.db.parsons_answers.div_id == current.db.questions.name) & (current.db.questions.chapter == self.chapter.chapter_label) ).select(orderby=current.db.parsons_answers.timestamp) # convert the numeric answer to letter answers to match the questions easier. to_letter = dict(zip("0123456789", "ABCDEFGHIJ")) for row in mcans: mc = row['mchoice_answers'] mc.answer = to_letter.get(mc.answer, mc.answer) def add_problems(result_set, tbl): for srow in result_set: row = srow[tbl] rslogger.debug("UPDATE_METRICS {}".format(row)) if not row.div_id in self.problems: self.problems[row.div_id] = ProblemMetrics( self.course_id, row.div_id, self.users) self.problems[row.div_id].add_data_point(row) add_problems(mcans, 'mchoice_answers') add_problems(fbans, 'fitb_answers') add_problems(psans, 'parsons_answers')
def has_storage_archived(self, product_id, user_id): """Return True if the product has archived storage for the given product in one of the entities of the given user. product_id -- the product id user_id -- the user id """ if current.auth.has_permission(user_id=user_id, name='admin'): _has_storage_archives = current.db((current.db.storage.product == product_id) & (current.db.storage.archive == True) & (current.db.storage.store_location == current.db.store_location.id) & (current.db.store_location.entity == current.db.entity.id)).count() != 0 else: _has_storage_archives = current.db((current.db.storage.product == product_id) & (current.db.storage.archive == True) & (current.db.storage.store_location == current.db.store_location.id) & (current.db.store_location.entity == current.db.entity.id) & (current.db.membership.user_id == user_id) & (current.db.entity.id == current.db.membership.group_id)).count() != 0 return _has_storage_archives
def test_mem004_07_member_search_advance_by_City_Town_Village(self): #return """ @case: mem004-07 @description: Search Members - Advanced Search by City / Town / Village """ self.start() self.advancedSearchTest(["Lour","Tequino Mata"]) member = current.s3db["member_membership"] loc = current.s3db["gis_location"] dbRowCount = current.db((member.deleted != 'T') & (member.location_id == loc.id) & ( (loc.L3 == 'Lour') | (loc.L3 == 'Tequino Mata') )).count() self.compareRowCount(dbRowCount)
def test_mem004_06_member_search_advance_by_County_District(self): #return """ @case: mem004-06 @description: Search Members - Advanced Search by County / District """ self.start() self.advancedSearchTest(["Laga"]) member = current.s3db["member_membership"] loc = current.s3db["gis_location"] dbRowCount = current.db((member.deleted != 'T') & (member.location_id == loc.id) & (loc.L2 == 'Laga')).count() self.compareRowCount(dbRowCount)
def test_mem004_05_member_search_advance_by_State_Province(self): #return """ @case: mem004-05 @description: Search Members - Advanced Search by State / Province """ self.start() self.advancedSearchTest(["Baucau","Ermera"]) member = current.s3db["member_membership"] loc = current.s3db["gis_location"] dbRowCount = current.db((member.deleted != 'T') & (member.location_id == loc.id) & ( (loc.L1 == 'Baucau') | (loc.L1 == 'Ermera') )).count() self.compareRowCount(dbRowCount)
def test_mem004_04_member_search_advance_by_Country(self): #return """ @case: mem004-04 @description: Search Members - Advanced Search by Country """ self.start() self.advancedSearchTest(["Timor-Leste"]) member = current.s3db["member_membership"] loc = current.s3db["gis_location"] dbRowCount = current.db((member.deleted != 'T') & (member.location_id == loc.id) & (loc.L0 == 'Timor-Leste')).count() self.compareRowCount(dbRowCount)
def _update_dataset(self, dataset): """ Update the data set from the repo, if possible Args: dataset: the sync_dataset Row """ s3 = current.response.s3 repository = self.repository code = dataset.code error_msg = "S3Sync: cannot update %s dataset from peer" % code # Update the data set from remote url = "%s/sync/dataset.xml?~.code=%s&mcomponents=None" % \ (repository.url, code) opener = self._http_opener(url) try: dataset_info = opener.open(url) except: current.log.error() return dataset if dataset_info: s3db = current.s3db resource = s3db.resource("sync_dataset", id=dataset.id) # Enable UUID synchronization synchronise_uuids = s3.synchronise_uuids s3.synchronise_uuids = True try: resource.import_xml(dataset_info) except IOError: current.log.error(error_msg) return dataset s3.synchronise_uuids = synchronise_uuids # Reload to get the updated information table = s3db.sync_dataset query = (table.id == dataset.id) dataset = current.db(query).select( table.id, table.code, table.use_archive, table.archive_url, limitby=(0, 1), ).first() return dataset
def has_parent(self, store_location_id): """Return True is the given store location has a parent. store_location_id -- the store location id """ my_logger.debug(message='store_location_id=%s' % store_location_id) _has_parent = current.db( current.db.store_location.id == store_location_id).select().first().parent is not None my_logger.debug(message='_has_parent=%s' % _has_parent) return _has_parent
def get_user(self): """ Returns the user info """ token = self.accessToken() if not token: return None session = current.session user = None try: user = self.call_api(token) except Exception: session.token = None user_dict = None if user: #if "email" not in user: # # Non-standard key for "email" claim # email = user.get("mail") #else: email = user.get("email") if not email: msg = "OpenID Connect: unidentifiable user %s" % user.get( "sub") current.session.warning = msg current.log.warning(msg) redirect(URL(c="default", f="user", args=["login"])) # Check if a user with this email has already registered table = current.auth.settings.table_user query = (table.email == email) existing = current.db(query).select(table.id, table.password, limitby=(0, 1)).first() if existing: user_dict = {"email": email, "password": existing.password} else: first_name = user.get("given_name", "") last_name = user.get("family_name", "") if not first_name and not last_name and "name" in user: # Try to parse the combined 'name' field from nameparser import HumanName name = HumanName(user.get("name", "")) first_name = name.first last_name = name.last user_dict = { "first_name": first_name, "last_name": last_name, "email": email, } return user_dict
def load_user_metrics(self, username): self.username = username self.course = current.db( current.db.courses.id == self.course_id).select().first() if not self.course: rslogger.debug("ERROR - NO COURSE course_id = {}".format( self.course_id)) self.chapters = current.db(current.db.chapters.course_id == current.auth.user.course_name).select() self.user = current.db((current.db.auth_user.username == username) & ( current.db.auth_user.course_id == self.course_id)).select( current.db.auth_user.id, current.db.auth_user.first_name, current.db.auth_user.last_name, current.db.auth_user.email, current.db.auth_user.username).first() if not self.user: rslogger.debug("ERROR - NO USER username={} course_id={}".format( username, self.course_id)) current.session.flash = 'Please make sure you are in the correct course' redirect(URL('default', 'courses')) # TODO: calling redirect here is kind of a hacky way to handle this. self.logs = current.db( (current.db.useinfo.course_id == self.course.course_name) & (current.db.useinfo.sid == username) & (current.db.useinfo.timestamp >= self.course.term_start_date) ).select(current.db.useinfo.timestamp, current.db.useinfo.sid, current.db.useinfo.event, current.db.useinfo.act, current.db.useinfo.div_id, orderby=~current.db.useinfo.timestamp) self.db_chapter_progress = current.db( (current.db.user_sub_chapter_progress.user_id == self.user.id )).select(current.db.user_sub_chapter_progress.chapter_id, current.db.user_sub_chapter_progress.sub_chapter_id, current.db.user_sub_chapter_progress.status) self.formatted_activity = UserLogCategorizer(self.logs) self.chapter_progress = UserActivityChapterProgress( self.chapters, self.db_chapter_progress)
def _search_for_next(rec): """ Internal function to find the next_in_stream from the db. Only needed when the sort order from the records set doesn't provide a matching next_in_stream. Returns: The db.audio.id of the next_in_stream """ # get the similarity time window as a where condition allowing # for wrapping at midnight sim_min = (rec.record_datetime - window).time() sim_max = (rec.record_datetime + window).time() if sim_min < sim_max: sim_where = ((current.db.audio.start_time > sim_min) & (current.db.audio.start_time < sim_max)) else: sim_where = ((current.db.audio.start_time > sim_min) | (current.db.audio.start_time < sim_max)) # search the current.db for the next later recording within the similarity window later = current.db( (current.db.audio.site_id == site_id) & (current.db.audio.record_datetime > rec.record_datetime) & sim_where).select(orderby=current.db.audio.record_datetime) if later: return later[0].id else: # if no later recordings in the slot, look for an earlier one earlier = current.db( (current.db.audio.site_id == site_id) & (current.db.audio.record_datetime < rec.record_datetime) & sim_where).select(orderby=~current.db.audio.record_datetime) if earlier: return earlier[0].id else: return None
def vol_programme_active(person_id): """ Whether a Volunteer counts as 'Active' based on the number of hours they've done (both Trainings & Programmes) per month, averaged over the last year. If nothing recorded for the last 3 months, don't penalise as assume that data entry hasn't yet been done. @ToDo: This should be based on the HRM record, not Person record - could be active with Org1 but not with Org2 @ToDo: allow to be calculated differently per-Org """ now = current.request.utcnow # Time spent on Programme work htable = current.s3db.hrm_programme_hours query = (htable.deleted == False) & \ (htable.person_id == person_id) & \ (htable.date != None) programmes = current.db(query).select(htable.hours, htable.date, orderby=htable.date) if programmes: # Ignore up to 3 months of records import datetime three_months_prior = (now - datetime.timedelta(days=92)) end = max(programmes.last().date, three_months_prior.date()) last_year = end - datetime.timedelta(days=365) # Is this the Volunteer's first year? if programmes.first().date > last_year: # Only start counting from their first month start = programmes.first().date else: # Start from a year before the latest record start = last_year # Total hours between start and end programme_hours = 0 for programme in programmes: if programme.date >= start and programme.date <= end and programme.hours: programme_hours += programme.hours # Average hours per month months = max(1, (end - start).days / 30.5) average = programme_hours / months # Active? if average >= 8: return True return False
def _update_token(token): """ Fitbit module will take care of refreshing tokens and call this method when necessary """ print("Updating token") # update the user dict user = current.db( current.db.auth_user.username == token['user_id']).select().first() if user: user.update(token=token) else: print("Token refreshed but user not found!!!")