def _doValidate( self ): #First, one must validate that the information is fine errors = self._abstractData.check() if errors: p = abstracts.WPAbstractSubmission( self, self._target ) pars = self._abstractData.toDict() pars["action"] = self._action pars["attachments"] = [] return p.display( **pars ) #Then, we create the abstract object and set its data to the one # received cfaMgr = self._target.getAbstractMgr() abstract = cfaMgr.newAbstract( self._getUser() ) self._abstractData.setAbstractData(abstract) #The commit must be forced before sending the confirmation DBMgr.getInstance().commit() #Email confirmation about the submission mail.Mailer.send( _AbstractSubmissionNotification( abstract ), self._conf.getSupportInfo().getEmail(returnNoReply=True) ) #Email confirmation about the submission to coordinators if cfaMgr.getSubmissionNotification().hasDestination(): asn=_AbstractSubmissionNotification( abstract ) asn.setSubject(_("[Indico] New abstract submission: %s")%asn.getDestination().getFullName()) mail.GenericMailer.send( asn ) #We must perform some actions: email warning to the authors #Finally, we display a confirmation form self._redirect( urlHandlers.UHAbstractSubmissionConfirmation.getURL( abstract ) )
def invokeMethod(self, method, params, req): MAX_RETRIES = 10 # clear the context ContextManager.destroy() DBMgr.getInstance().startRequest() # room booking database _startRequestSpecific2RH() # notify components that the request has started self._notify('requestStarted', req) forcedConflicts = Config.getInstance().getForceConflicts() retry = MAX_RETRIES try: while retry > 0: if retry < MAX_RETRIES: # notify components that the request is being retried self._notify('requestRetry', req, MAX_RETRIES - retry) try: # delete all queued emails GenericMailer.flushQueue(False) DBMgr.getInstance().sync() try: result = processRequest(method, copy.deepcopy(params), req) except MaKaC.errors.NoReportError, e: raise NoReportError(e.getMsg()) rh = ContextManager.get('currentRH') # notify components that the request has ended self._notify('requestFinished', req) # Raise a conflict error if enabled. This allows detecting conflict-related issues easily. if retry > (MAX_RETRIES - forcedConflicts): raise ConflictError _endRequestSpecific2RH( True ) DBMgr.getInstance().endRequest(True) GenericMailer.flushQueue(True) # send emails if rh._redisPipeline: try: rh._redisPipeline.execute() except RedisError: Logger.get('redis').exception('Could not execute pipeline') break except ConflictError: _abortSpecific2RH() DBMgr.getInstance().abort() retry -= 1 continue except ClientDisconnected: _abortSpecific2RH() DBMgr.getInstance().abort() retry -= 1 time.sleep(MAX_RETRIES - retry) continue
def _doValidate(self): # First, one must validate that the information is fine errors = self._abstractData.check() if errors: p = abstracts.WPAbstractSubmission(self, self._target) pars = self._abstractData.toDict() pars["action"] = self._action pars["attachments"] = [] return p.display(**pars) # Then, we create the abstract object and set its data to the one # received cfaMgr = self._target.getAbstractMgr() abstract = cfaMgr.newAbstract(self._getUser()) self._abstractData.setAbstractData(abstract) # The commit must be forced before sending the confirmation DBMgr.getInstance().commit() # Email confirmation about the submission mail.Mailer.send( _AbstractSubmissionNotification(abstract), self._conf.getSupportInfo().getEmail(returnNoReply=True) ) # Email confirmation about the submission to coordinators if cfaMgr.getSubmissionNotification().hasDestination(): asn = _AbstractSubmissionNotification(abstract) asn.setSubject(_("[Indico] New abstract submission: %s") % asn.getDestination().getFullName()) mail.GenericMailer.send(asn) # We must perform some actions: email warning to the authors # Finally, we display a confirmation form self._redirect(urlHandlers.UHAbstractSubmissionConfirmation.getURL(abstract))
def main(argv): category = -1 meeting = -1 show = 0 try: opts, args = getopt.getopt(argv, "hm:c:s", ["help", "meeting=", "category=", "show"]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt in ("-s", "--show"): show = 1 elif opt in ("-m", "--meeting"): meeting = arg elif opt in ("-c", "--category"): category = arg #Create database instance and open trashcan manager object DBMgr.getInstance().startRequest() t = TrashCanManager() conf = None if (show): for i in t.getList(): if isinstance(i, Conference): if meeting != -1 and i.getId() == meeting: print "[%s]%s" % (i.getId(), i.getTitle()) elif meeting == -1: print "[%s]%s" % (i.getId(), i.getTitle()) sys.exit() if (meeting != -1 and category != -1): print "Meeting:%s" % meeting print "Category:%s" % category for i in t.getList(): if isinstance(i, Conference): if i.getId() == meeting: conf = i break if conf: #Remove the meeting from conference t.remove(conf) #Attach meeting to desired category cat = CategoryManager().getById(category) ConferenceHolder().add(conf) cat._addConference(conf) #Add Evaluation c = ConferenceHolder().getById(meeting) from MaKaC.evaluation import Evaluation c.setEvaluations([Evaluation(c)]) DBMgr.getInstance().endRequest()
def getConfIds(): DBMgr.getInstance().startRequest() l = ConferenceHolder().getValuesToList() ids = [] for conf in l: ids.append(conf.getId()) DBMgr.getInstance().endRequest() return ids
def _doValidate(self): #First, one must validate that the information is fine errors = self._abstractData.check() if errors: p = abstracts.WPAbstractSubmission(self, self._target) pars = self._abstractData.toDict() pars["errors"] = errors pars["action"] = self._action return p.display(**pars) #Then, we create the abstract object and set its data to the one # received cfaMgr = self._target.getAbstractMgr() afm = cfaMgr.getAbstractFieldsMgr() a = cfaMgr.newAbstract(self._getUser()) a.setTitle(self._abstractData.title) for f in afm.getFields(): id = f.getId() a.setField(id, self._abstractData.getFieldValue(id)) for authData in self._abstractData.authors.getPrimaryList(): auth=a.newPrimaryAuthor(title = authData["auth_title"], \ firstName = authData["auth_firstName"], \ surName = authData["auth_surName"], \ email = authData["auth_email"], \ affiliation = authData["auth_affiliation"], \ address = authData["auth_address"], \ telephone = authData["auth_phone"] ) if authData["auth_speaker"]: a.addSpeaker(auth) for authData in self._abstractData.authors.getSecondaryList(): auth=a.newCoAuthor(title = authData["auth_title"], \ firstName = authData["auth_firstName"], \ surName = authData["auth_surName"], \ email = authData["auth_email"], \ affiliation = authData["auth_affiliation"], \ address = authData["auth_address"], \ telephone = authData["auth_phone"] ) if authData["auth_speaker"]: a.addSpeaker(auth) a.setContribType(self._abstractData.type) for trackId in self._abstractData.tracks: track = self._conf.getTrackById(trackId) a.addTrack(track) a.setComments(self._abstractData.comments) #The commit must be forced before sending the confirmation DBMgr.getInstance().commit() #Email confirmation about the submission mail.Mailer.send(_AbstractSubmissionNotification(a)) #Email confirmation about the submission to coordinators if cfaMgr.getSubmissionNotification().hasDestination(): asn = _AbstractSubmissionNotification(a) asn.setSubject( _("[Indico] New abstract submission: %s") % asn.getDestination().getFullName()) mail.GenericMailer.send(asn) #We must perform some actions: email warning to the authors #Finally, we display a confirmation form self._redirect(urlHandlers.UHAbstractSubmissionConfirmation.getURL(a))
def main(argv): category = -1 meeting = -1 show = 0 try: opts, args = getopt.getopt(argv, "hm:c:s", ["help", "meeting=", "category=", "show"]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt in ("-s", "--show"): show = 1 elif opt in ("-m", "--meeting"): meeting = arg elif opt in ("-c", "--category"): category = arg # Create database instance and open trashcan manager object DBMgr.getInstance().startRequest() t = TrashCanManager() conf = None if show: for i in t.getList(): if isinstance(i, Conference): if meeting != -1 and i.getId() == meeting: print "[%s]%s" % (i.getId(), i.getTitle()) elif meeting == -1: print "[%s]%s" % (i.getId(), i.getTitle()) sys.exit() if meeting != -1 and category != -1: print "Meeting:%s" % meeting print "Category:%s" % category for i in t.getList(): if isinstance(i, Conference): if i.getId() == meeting: conf = i break if conf: # Remove the meeting from conference t.remove(conf) # Attach meeting to desired category cat = CategoryManager().getById(category) ConferenceHolder().add(conf) cat._addConference(conf) # Add Evaluation c = ConferenceHolder().getById(meeting) from MaKaC.evaluation import Evaluation c.setEvaluations([Evaluation(c)]) DBMgr.getInstance().endRequest()
def _prepareDB(self): # since the DBMgr instance will be replicated across objects, # we just set it as None for this one. # first, store the server address - this wouldn't normally be needed, # but the tests won't work otherwise (as the DB is _not_ the default one) hostname, port = DBMgr._instance._db.storage._addr DBMgr.setInstance(DBMgr(hostname, port)) self._dbi = DBMgr.getInstance()
def deleteSessions(sm,todelete): done = 0 for key in todelete : sm.delete_session(key) done+=1 try: DBMgr.getInstance().commit() except: DBMgr.getInstance().sync() print "deleted %s sessions" % (len(todelete))
def invokeMethod(self, method, params, req): MAX_RETRIES = 10 # clear the context ContextManager.destroy() DBMgr.getInstance().startRequest() # room booking database _startRequestSpecific2RH() # notify components that the request has started self._notify('requestStarted', req) forcedConflicts = Config.getInstance().getForceConflicts() retry = MAX_RETRIES try: while retry > 0: if retry < MAX_RETRIES: # notify components that the request is being retried self._notify('requestRetry', req, MAX_RETRIES - retry) try: # delete all queued emails GenericMailer.flushQueue(False) DBMgr.getInstance().sync() try: result = processRequest(method, copy.deepcopy(params), req) except MaKaC.errors.NoReportError, e: raise NoReportError(e.getMsg()) # notify components that the request has ended self._notify('requestFinished', req) # Raise a conflict error if enabled. This allows detecting conflict-related issues easily. if retry > (MAX_RETRIES - forcedConflicts): raise ConflictError _endRequestSpecific2RH(True) DBMgr.getInstance().endRequest(True) GenericMailer.flushQueue(True) # send emails break except ConflictError: _abortSpecific2RH() DBMgr.getInstance().abort() retry -= 1 continue except ClientDisconnected: _abortSpecific2RH() DBMgr.getInstance().abort() retry -= 1 time.sleep(MAX_RETRIES - retry) continue
def main(): DBMgr.getInstance().startRequest() ch = ConferenceHolder() ids = [] print "Getting conference IDs..." for conf in ch.getList(): ids.append(conf.getId()) totalNumberConfs = len(ids) DBMgr.getInstance().endRequest() print "Updating conferences..." i = 0 N_CONF = 10 cErrorList = [] while ids: if len(ids) >= N_CONF: lids = ids[:N_CONF] del ids[:N_CONF] else: lids = ids ids = None for j in range(10): conf = None try: DBMgr.getInstance().startRequest() for id in lids: conf = ch.getById(id) log("check for conference %s: %s/%s" % (conf.getId(), i, totalNumberConfs)) for cont in conf.getContributionList(): # if not isinstance(cont, AcceptedContribution): if "content" in cont.getFields().keys(): if cont.getFields()["content"]: if cont.getFields()["content"] != cont.description: log( " contribution %s : content field no empty and diffrent from description" % cont.getId() ) else: # cont.setField("content",cont.description) cont.getFields()["content"] = cont.description cont._p_changed = 1 else: # cont.setField("content",cont.description) cont.getFields()["content"] = cont.description cont._p_changed = 1 i += 1 DBMgr.getInstance().endRequest() print "wait 0.5s..." sleep(0.5) break except Exception, e: cErrorList.append(conf) i -= N_CONF log("error %s, retry %d time(s)" % (e, int(10 - j))) sleep(int(j)) DBMgr.getInstance().abort()
def _doValidate( self ): #First, one must validate that the information is fine errors = self._abstractData.check() if errors: p = abstracts.WPAbstractSubmission( self, self._target ) pars = self._abstractData.toDict() pars["errors"] = errors pars["action"] = self._action return p.display( **pars ) #Then, we create the abstract object and set its data to the one # received cfaMgr = self._target.getAbstractMgr() afm = cfaMgr.getAbstractFieldsMgr() a = cfaMgr.newAbstract( self._getUser() ) a.setTitle( self._abstractData.title ) for f in afm.getFields(): id = f.getId() a.setField(id, self._abstractData.getFieldValue(id)) for authData in self._abstractData.authors.getPrimaryList(): auth=a.newPrimaryAuthor(title = authData["auth_title"], \ firstName = authData["auth_firstName"], \ surName = authData["auth_surName"], \ email = authData["auth_email"], \ affiliation = authData["auth_affiliation"], \ address = authData["auth_address"], \ telephone = authData["auth_phone"] ) if authData["auth_speaker"]: a.addSpeaker( auth ) for authData in self._abstractData.authors.getSecondaryList(): auth=a.newCoAuthor(title = authData["auth_title"], \ firstName = authData["auth_firstName"], \ surName = authData["auth_surName"], \ email = authData["auth_email"], \ affiliation = authData["auth_affiliation"], \ address = authData["auth_address"], \ telephone = authData["auth_phone"] ) if authData["auth_speaker"]: a.addSpeaker( auth ) a.setContribType( self._abstractData.type ) for trackId in self._abstractData.tracks: track = self._conf.getTrackById( trackId ) a.addTrack( track ) a.setComments(self._abstractData.comments) #The commit must be forced before sending the confirmation DBMgr.getInstance().commit() #Email confirmation about the submission mail.Mailer.send( _AbstractSubmissionNotification( a ) ) #Email confirmation about the submission to coordinators if cfaMgr.getSubmissionNotification().hasDestination(): asn=_AbstractSubmissionNotification( a ) asn.setSubject(_("[Indico] New abstract submission: %s")%asn.getDestination().getFullName()) mail.GenericMailer.send( asn ) #We must perform some actions: email warning to the authors #Finally, we display a confirmation form self._redirect( urlHandlers.UHAbstractSubmissionConfirmation.getURL( a ) )
def main(): DBMgr.getInstance().startRequest() ch = ConferenceHolder() ids = [] print "Getting conference IDs..." for conf in ch.getList(): ids.append(conf.getId()) totalNumberConfs = len(ids) DBMgr.getInstance().endRequest() print "Updating conferences..." i = 0 N_CONF = 10 cErrorList = [] while ids: if len(ids) >= N_CONF: lids = ids[:N_CONF] del ids[:N_CONF] else: lids = ids ids = None for j in range(10): conf = None try: DBMgr.getInstance().startRequest() for id in lids: conf = ch.getById(id) log("check for conference %s: %s/%s" % (conf.getId(), i, totalNumberConfs)) for cont in conf.getContributionList(): #if not isinstance(cont, AcceptedContribution): if "content" in cont.getFields().keys(): if cont.getFields()["content"]: if cont.getFields( )["content"] != cont.description: log(" contribution %s : content field no empty and diffrent from description" % cont.getId()) else: #cont.setField("content",cont.description) cont.getFields()["content"] = cont.description cont._p_changed = 1 else: #cont.setField("content",cont.description) cont.getFields()["content"] = cont.description cont._p_changed = 1 i += 1 DBMgr.getInstance().endRequest() print "wait 0.5s..." sleep(0.5) break except Exception, e: cErrorList.append(conf) i -= N_CONF log("error %s, retry %d time(s)" % (e, int(10 - j))) sleep(int(j)) DBMgr.getInstance().abort()
def buildCache(ids): i = 1 for id in ids: DBMgr.getInstance().startRequest() try: conf = ConferenceHolder().getById(id) except: print "conf %s not found" continue print i, ":", conf.getId() og = outputGenerator(AccessWrapper()) x = og.confToXML(conf, 1, 1, 1, overrideCache=True) y = og.confToXMLMarc21(conf, 1, 1, 1, overrideCache=True) i += 1 DBMgr.getInstance().endRequest()
def _sendReminderEmail(self, sb): from MaKaC.conference import SessionChair DBMgr.getInstance().commit() if type(sb)==list: # Sending email just about the participations of the list "sb" (normally # they are sessions from one event) notif = _PendingCoordinatorNotification( sb ) mail.GenericMailer.send( notif ) elif isinstance(sb, SessionChair): # The param "sb" is a SessionChair, so we send an email with the info # about all its participations psList=self.getPendingByEmail(sb.getEmail()) if psList != [] and psList is not None: notif = _PendingCoordinatorNotification( psList ) mail.GenericMailer.send( notif )
def main(): """ This script deletes existing category indexes and recreates them. """ dbi = DBMgr.getInstance() dbi.startRequest() im = indexes.IndexesHolder() im.removeById('categoryDate') catIdx = im.getIndex('categoryDate') cm = CategoryManager() num_categs = len(cm._getIdx()) cur_num = cur_percent = 0 for cat in cm._getIdx().itervalues(): for conf in cat.conferences.itervalues(): catIdx.indexConf(conf) dbi.commit() cur_num += 1 percent = int(float(cur_num) / num_categs * 100) if percent != cur_percent: cur_percent = percent print "{0}%".format(percent) dbi.endRequest()
def run(self): sm = SyncManager.getDBInstance() logger = self.getLogger() # go over all the agents for agtName, agent in sm.getAllAgents().iteritems(): # skip agents if they're not active if not agent.isActive(): logger.warning("Agent '%s' is not active - skipping" % agtName) continue logger.info("Starting agent '%s'" % agtName) try: dbi = DBMgr.getInstance() # pass the current time and a logger result = agent.run(int_timestamp(nowutc()), logger=logger, dbi=dbi) except: logger.exception("Problem running agent '%s'" % agtName) return if result: logger.info("Acknowledged successful operation") agent.acknowledge() dbi.commit() else: logger.info("'Acknowledge' not done - no records?") logger.info("Agent '%s' finished" % agtName)
def _stop(args): _setup(args) running = _check_running() if not args.force and not running: raise Exception("The daemon doesn't seem to be running (consider -f?)") dbi = DBMgr.getInstance() dbi.startRequest() c = Client() c.shutdown(msg = "Daemon script") dbi.commit() print "Waiting for death confirmation... " for i in range(0, 20): if not c.getStatus()['state']: break else: time.sleep(1) dbi.sync() else: print "FAILED!" print "DONE!" dbi.endRequest()
def _run(args): _setup(args) formatter = logging.Formatter( "%(asctime)s %(name)s - %(levelname)s %(filename)s:%(lineno)s: %(message)s" ) root = logging.getLogger('') handler = logging.StreamHandler() handler.setFormatter(formatter) root.addHandler(handler) dbi = DBMgr.getInstance(max_disconnect_poll=40) dbi.startRequest() info = HelperMaKaCInfo.getMaKaCInfoInstance() useRBDB = info.getRoomBookingModuleActive() if useRBDB: DALManager.connect() sm = SchedulerModule.getDBInstance() t = sm.getTaskById(args.taskid) t.plugLogger(logging.getLogger('console.run/%s' % args.taskid)) t.run() if useRBDB: DALManager.commit() DALManager.disconnect() dbi.endRequest()
def _show(args): dbi = DBMgr.getInstance() dbi.startRequest() c = Client() if args.field == "status": status = c.getStatus() print "Scheduler is currently %s" % \ ("running" if status['state'] else "NOT running") print """ Spooled commands: %(spooled)s Tasks: - Waiting: %(waiting)s - Running: %(running)s - Failed: %(failed)s - Finished: %(finished)s """ % status elif args.field == "spool": for op, obj in c.getSpool(): if op in ['add', 'del']: print "%s %s" % (op, obj) else: print op dbi.endRequest()
def _stop(args): _setup(args) running = _check_running() if not args.force and not running: raise Exception("The daemon doesn't seem to be running (consider -f?)") dbi = DBMgr.getInstance() dbi.startRequest() c = Client() c.shutdown(msg="Daemon script") dbi.commit() print "Waiting for death confirmation... " for i in range(0, 20): if not c.getStatus()['state']: break else: time.sleep(1) dbi.sync() else: print "FAILED!" print "DONE!" dbi.endRequest()
def _run(args): _setup(args) formatter = logging.Formatter("%(asctime)s %(name)s - %(levelname)s %(filename)s:%(lineno)s: %(message)s") root = logging.getLogger('') handler = logging.StreamHandler() handler.setFormatter(formatter) root.addHandler(handler) dbi = DBMgr.getInstance(max_disconnect_poll=40) dbi.startRequest() info = HelperMaKaCInfo.getMaKaCInfoInstance() useRBDB = info.getRoomBookingModuleActive() if useRBDB: DALManager.connect() sm = SchedulerModule.getDBInstance() t = sm.getTaskById(args.taskid) t.plugLogger(logging.getLogger('console.run/%s' % args.taskid)) t.run() if useRBDB: DALManager.commit() DALManager.disconnect() dbi.endRequest()
def main(): dbi = DBMgr.getInstance() dbi.startRequest() ch = ConferenceHolder() totalSize = 0 fNumber = 0 for __, obj in conferenceHolderIterator(ch, verbose=True): for material in obj.getAllMaterialList(): for res in material.getResourceList(): if isinstance(res, LocalFile): try: totalSize += res.getSize() fNumber += 1 except OSError: print "Problems stating size of '%s'" % res.getFilePath( ) dbi.endRequest(False) print "%d files, %d bytes total" % (fNumber, totalSize) print "avg %s bytes/file" % (float(totalSize) / fNumber)
def getInstance( self ): dbroot = DBMgr.getInstance().getDBConnection().root() if dbroot.has_key("adminlist"): return dbroot["adminlist"] al = _AdminList() dbroot["adminlist"] = al return al
def cacheDay(dest, day): """ Cache a day, by calling wget in "mirror" mode """ dbi = DBMgr.getInstance() dbi.startRequest() index = {} calIdx = IndexesHolder().getIndex('calendar') objs = calIdx.getObjectsInDay(day) for confId in objs: if confId == '': continue obj = ConferenceHolder().getById(confId) url = str(urlHandlers.UHConferenceDisplay.getURL(obj)) savedDirs = re.match(r'http:\/\/(.*)', url).group(1).split('/') print "Calling wget for %s..." % url os.system(WGET_COMMAND % (confId, url, os.path.join(dest, confId), savedDirs[0])) print "done!" index[confId] = (os.path.join(confId,*savedDirs)+'.html', obj.getTitle()) dbi.endRequest(False) return index
def getToList(): toList = [] DBMgr.getInstance().startRequest() ch = conference.ConferenceHolder() c = ch.getById("0") toList = [] for contrib in c.getContributionList(): if not isinstance(contrib.getCurrentStatus(), conference.ContribStatusWithdrawn): for pa in contrib.getPrimaryAuthorList(): if pa.getEmail().strip() != "" and (not pa.getEmail() in toList): toList.append(pa.getEmail()) for spk in contrib.getSpeakerList(): if spk.getEmail().strip() != "" and (not spk.getEmail() in toList): toList.append(spk.getEmail()) DBMgr.getInstance().endRequest() return toList
def cacheDay(dest, day): """ Cache a day, by calling wget in "mirror" mode """ dbi = DBMgr.getInstance() dbi.startRequest() index = {} calIdx = IndexesHolder().getIndex('calendar') objs = calIdx.getObjectsInDay(day) for confId in objs: if confId == '': continue obj = ConferenceHolder().getById(confId) url = str(urlHandlers.UHConferenceDisplay.getURL(obj)) savedDirs = re.match(r'http:\/\/(.*)', url).group(1).split('/') print "Calling wget for %s..." % url os.system(WGET_COMMAND % (confId, url, os.path.join(dest, confId), savedDirs[0])) print "done!" index[confId] = (os.path.join(confId, *savedDirs) + '.html', obj.getTitle()) dbi.endRequest(False) return index
def _event_or_shorturl(confId, shorturl_namespace=False, ovw=False): from MaKaC.conference import ConferenceHolder from MaKaC.common.url import ShortURLMapper with DBMgr.getInstance().global_connection(): ch = ConferenceHolder() su = ShortURLMapper() if ch.hasKey(confId): # For obvious reasons an event id always comes first. # If it's used within the short url namespace we redirect to the event namespace, otherwise # we call the RH to display the event if shorturl_namespace: url = UHConferenceDisplay.getURL(ch.getById(confId)) func = lambda: redirect(url) else: params = request.args.to_dict() params['confId'] = confId if ovw: params['ovw'] = 'True' func = lambda: conferenceDisplay.RHConferenceDisplay(None).process(params) elif (shorturl_namespace or app.config['INDICO_COMPAT_ROUTES']) and su.hasKey(confId): if shorturl_namespace: # Correct namespace => redirect to the event url = UHConferenceDisplay.getURL(su.getById(confId)) func = lambda: redirect(url) else: # Old event namespace => 301-redirect to the new shorturl first to get Google etc. to update it url = url_for('.shorturl', confId=confId) func = lambda: redirect(url, 301) else: raise NotFound( _('The specified event with id or tag "%s" does not exist or has been deleted') % confId) return func()
def getIdx(cls, indexName, db=None): if not db: db = DBMgr.getInstance().getDBConnection() if 'catalog' not in db.root(): cls.initialize(db=db) return db.root()['catalog'].get(indexName)
def getSessionManager(debug=0): root = DBMgr.getInstance().getDBConnection().root() try: sm = root["SessionManagers"]["main"] except KeyError, e: sm = PSessionManager() root["SessionManagers"] = OOBTree.OOBTree() root["SessionManagers"]["main"] = sm
def getRepository(): dbRoot = DBMgr.getInstance().getDBConnection().root() try: fr = dbRoot["local_repositories"]["main"] except KeyError, e: fr = fileRepository.MaterialLocalRepository() dbRoot["local_repositories"] = OOBTree.OOBTree() dbRoot["local_repositories"]["main"] = fr
def updateStatistics(cls, cat, logger=None): dbi = DBMgr.getInstance() cls._updateStatistics(cat, dbi, 0, logger) if logger: logger.info("Statistics calculation finished") dbi.commit()
def _getConfRegistry( self ): if not self._confRegistry: db_root = DBMgr.getInstance().getDBConnection().root() if db_root.has_key( "webfactoryregistry" ): self._confRegistry = db_root["webfactoryregistry"] else: self._confRegistry = OOBTree.OOBTree() db_root["webfactoryregistry"] = self._confRegistry return self._confRegistry
def _check_running(): dbi = DBMgr.getInstance() dbi.startRequest() c = Client() running = c.getStatus()['state'] dbi.endRequest() return running
def _process( self ): ih = AuthenticatorMgr() #first, check if login is free if not ih.isLoginFree(self._login): self._redirect(self._fromURL + "&msg=Login not avaible") return #then, check if password is OK if self._pwd != self._pwdBis: self._redirect(self._fromURL + "&msg=You must enter the same password twice") return #create the indentity li = user.LoginInfo( self._login, self._pwd ) id = ih.createIdentity( li, self._avatar, self._system ) ih.add( id ) #commit and if OK, send activation mail DBMgr.getInstance().commit() scr = mail.sendConfirmationRequest(self._avatar) scr.send() self._redirect( urlHandlers.UHUserDetails.getURL( self._avatar ) ) #to set to the returnURL
def getDBInstance(cls): """ Returns the instance of SyncManager currently in the DB """ storage = getPluginType().getStorage() if 'agent_manager' in storage: return storage['agent_manager'] else: root = DBMgr.getInstance().getDBConnection() updateDBStructures(root)
def _getInternalPagesMgrRegistery( self ): #DBMgr.getInstance().commit() if not self._pagesMgrRegistery: db_root = DBMgr.getInstance().getDBConnection().root() if db_root.has_key( "internalPagesRegistery" ): self._pagesMgrRegistery = db_root["internalPagesRegistery"] else: self._pagesMgrRegistery = OOBTree.OOBTree() db_root["internalPagesRegistery"] = self._pagesMgrRegistery return self._pagesMgrRegistery
def changeCreator(oldUser, newUser): dbi = DBMgr.getInstance() dbi.startRequest() Factory.getDALManager().connect() # check if the users exist if AvatarHolder().getById(oldUser) is None: print "There is no user with id %s" % oldUser return if AvatarHolder().getById(newUser) is None: print "There is no user with id %s" % newUser return resvEx = ReservationBase() resvEx.createdBy = oldUser allResv4OldUser = CrossLocationQueries.getReservations(resvExample=resvEx) if allResv4OldUser == []: print "No reservations for user %s" % oldUser return # resvs = ReservationBase.getReservations() # allResv4OldUser = [x for x in allResv if x.createdBy == oldUser] if type(allResv4OldUser) is not list: allResv4OldUser = [allResv4OldUser] # Modify reservations for r in allResv4OldUser: r.createdBy = newUser #print r.createdBy, r.id # Update index userReservationsIndexBTree = Reservation.getUserReservationsIndexRoot() newUserResvs = userReservationsIndexBTree.get(newUser) if newUserResvs == None: newUserResvs = [] # New list of reservations for this room userReservationsIndexBTree.insert(newUser, newUserResvs) newUserResvs.extend(allResv4OldUser) userReservationsIndexBTree[newUser] = newUserResvs[:] if userReservationsIndexBTree.has_key(oldUser): userReservationsIndexBTree.pop(oldUser) userReservationsIndexBTree._p_changed = 1 # close DB connection Factory.getDALManager().commit() Factory.getDALManager().disconnect() dbi.endRequest() print "%s reservations have moved from creator %s to creator %s" % ( len(allResv4OldUser), oldUser, newUser)
def _cmd(args): dbi = DBMgr.getInstance() dbi.startRequest() c = Client() if args.command == "clear_spool": print "%s operations removed" % c.clearSpool() dbi.endRequest()
def changeCreator(oldUser, newUser): dbi = DBMgr.getInstance() dbi.startRequest() Factory.getDALManager().connect() # check if the users exist if AvatarHolder().getById(oldUser) is None: print "There is no user with id %s"%oldUser return if AvatarHolder().getById(newUser) is None: print "There is no user with id %s"%newUser return resvEx = ReservationBase() resvEx.createdBy = oldUser allResv4OldUser = CrossLocationQueries.getReservations( resvExample = resvEx) if allResv4OldUser == []: print "No reservations for user %s"%oldUser return # resvs = ReservationBase.getReservations() # allResv4OldUser = [x for x in allResv if x.createdBy == oldUser] if type(allResv4OldUser) is not list: allResv4OldUser = [allResv4OldUser] # Modify reservations for r in allResv4OldUser: r.createdBy = newUser #print r.createdBy, r.id # Update index userReservationsIndexBTree = Reservation.getUserReservationsIndexRoot() newUserResvs = userReservationsIndexBTree.get( newUser ) if newUserResvs == None: newUserResvs = [] # New list of reservations for this room userReservationsIndexBTree.insert( newUser, newUserResvs ) newUserResvs.extend( allResv4OldUser ) userReservationsIndexBTree[newUser] = newUserResvs[:] if userReservationsIndexBTree.has_key(oldUser): userReservationsIndexBTree.pop(oldUser) userReservationsIndexBTree._p_changed = 1 # close DB connection Factory.getDALManager().commit() Factory.getDALManager().disconnect() dbi.endRequest() print "%s reservations have moved from creator %s to creator %s" % (len(allResv4OldUser), oldUser, newUser)
def buildCache(ids): i = 1 for id in ids: DBMgr.getInstance().startRequest() try: conf = ConferenceHolder().getById(id) except: print "conf %s not found" continue j = 1 for cont in conf.getContributionList(): print "conf %d:%s - contrib %d:%s"%(i, conf.getId(), j, cont.getId()) og = outputGenerator(AccessWrapper()) x = og.contribToXMLMarc21(cont, 1, overrideCache=True) for subCont in cont.getSubContributionList(): print "conf %d:%s - contrib %d:%s - subContrib:%s"%(i, conf.getId(), j, cont.getId(), subCont.getId()) y = og.subContribToXMLMarc21(subCont, 1, overrideCache=True) j += 1 i += 1 DBMgr.getInstance().endRequest()
def getToList(): toList = [] DBMgr.getInstance().startRequest() ch = conference.ConferenceHolder() c = ch.getById("0") toList = [] i = 0 for contrib in c.getContributionList(): if contrib.getPaper() == None or contrib.getPaper().getResourceList() == []: if not isinstance(contrib.getCurrentStatus(), conference.ContribStatusWithdrawn): i += 1 for pa in contrib.getPrimaryAuthorList(): if pa.getEmail().strip() != "" and (not pa.getEmail() in toList): toList.append(pa.getEmail()) for spk in contrib.getSpeakerList(): if spk.getEmail().strip() != "" and (not spk.getEmail() in toList): toList.append(spk.getEmail()) DBMgr.getInstance().endRequest() print "Number of contribs without papers:%s"%i return toList
def sync(confId): DBMgr.getInstance().startRequest() conf = ConferenceHolder().getById(confId) counter = [] if conf is None: raise Exception("Error fetching conference") else: for abstract in conf.getAbstractMgr().getAbstractList(): if isinstance(abstract.getCurrentStatus(), AbstractStatusAccepted): contrib = abstract.getContribution() contrib.setTitle(abstract.getTitle()) contrib.setDescription(abstract.getField('content')) contrib.setField('summary', abstract.getField('summary')) contrib.setTrack(abstract.getCurrentStatus().getTrack()) contrib.setType(abstract.getCurrentStatus().getType()) for auth1 in contrib.getPrimaryAuthorList()[:]: contrib.removePrimaryAuthor(auth1) for auth2 in contrib.getCoAuthorList()[:]: contrib.removeCoAuthor(auth2) for auth3 in contrib.getSpeakerList()[:]: contrib.removeSpeaker(auth3) for auth in abstract.getAuthorList(): c_auth = ContributionParticipation() contrib._setAuthorValuesFromAbstract(c_auth, auth) if abstract.isPrimaryAuthor(auth): contrib.addPrimaryAuthor(c_auth) else: contrib.addCoAuthor(c_auth) if abstract.isSpeaker(auth): contrib.addSpeaker(c_auth) # TODO: remove the previous submitter...how??? submitter = contrib.getAbstract().getSubmitter().getUser() contrib._grantSubmission(submitter) counter.append(contrib.getId()) DBMgr.getInstance().endRequest() print "%s contributions synchronized (%s)" % (len(counter), ', '.join(counter))