def setMembershipTime(session, groupName, memberName, enable_str=None, expire_str=None): """ Set the expiration date for a group member. :param session:`The grouper session` :param groupName:`The fully qualified group name` :param memberName:`The fully qualified member name` :param enable_str:`The enable date/time in yyyy-mm-dd HH:MM:SS format` :param expire_str:`The expiration date/time in yyyy-mm-dd HH:MM:SS format` """ sdf = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") if enable_str is not None: ts = sdf.parse(enable_str) enable_millis = ts.getTime() else: enable_millis = None if expire_str is not None: ts = sdf.parse(expire_str) expire_millis = ts.getTime() else: expire_millis = None grp = jython_grouper.getGroup(session, groupName) memberships = grp.memberships.toArray() for m in memberships: member = m.member if member.name == memberName: m.enabledTimeDb = enable_millis m.disabledTimeDb = expire_millis m.update() return True else: return False
def flowFileDates(hmsRunName): #will not be accurate if there is more than one set of data--specify start date in config file? """Find the dates of any FLOW files in the DSS catalog""" from java.text import SimpleDateFormat dateFormat = SimpleDateFormat("ddMMMyyyy") print("Getting dates from " + hmsRunName + "...") dates = [] #print(dss.getCatalogedPathnames()) flowFiles = filter( lambda f: ((f.split('/')[3] == 'FLOW') and (f.split('/')[6] == ('RUN:' + hmsRunName.upper()))), dss.getCatalogedPathnames()) #print(flowFiles) candidateDates = map(lambda x: x.split('/')[4], flowFiles) #print(candidateDates) for d in candidateDates: if d[0:2].isdigit() and d[2:5].isalpha() and d[5:9].isdigit(): date = dateFormat.parse(d) dateAlreadyFound = any(lambda x: x.equals(date), dates) if not dateAlreadyFound: dates.append(date) dates.sort(lambda a, b: a.compareTo(b)) return map(lambda d: dateFormat.format(d).upper(), dates)
def setupjob(job, args): """ Set up a job to run on telemetry date ranges using data from HBase Telemetry jobs expect two arguments, startdate and enddate, both in yyyymmdd format. """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Calendar as Calendar import com.mozilla.hadoop.hbase.mapreduce.MultiScanTableMapReduceUtil as MSTMRU import com.mozilla.util.Pair if len(args) != 2: raise Exception("Usage: <startdate-YYYYMMDD> <enddate-YYYYMMDD>") sdf = SimpleDateFormat(dateformat) startdate = Calendar.getInstance() startdate.setTime(sdf.parse(args[0])) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(args[1])) columns = [com.mozilla.util.Pair('data', 'json')] scans = MSTMRU.generateBytePrefixScans(startdate, enddate, dateformat, columns, 500, False) MSTMRU.initMultiScanTableMapperJob( 'telemetry', scans, None, None, None, job) # inform HadoopDriver about the columns we expect to receive job.getConfiguration().set("org.mozilla.jydoop.hbasecolumns", "data:json");
def __newDoc(self): self.oid = self.object.getId() self.pid = self.payload.getId() metadataPid = self.params.getProperty("metaPid", "DC") self.utils.add(self.index, "storage_id", self.oid) if self.pid == metadataPid: self.itemType = "object" else: self.oid += "/" + self.pid self.itemType = "datastream" self.utils.add(self.index, "identifier", self.pid) self.utils.add(self.index, "id", self.oid) self.utils.add(self.index, "item_type", self.itemType) self.utils.add(self.index, "last_modified", self.last_modified) self.utils.add(self.index, "harvest_config", self.params.getProperty("jsonConfigOid")) self.utils.add(self.index, "harvest_rules", self.params.getProperty("rulesOid")) self.item_security = [] self.owner = self.params.getProperty("owner", "guest") formatter = SimpleDateFormat('yyyyMMddHHmmss') self.params.setProperty("last_modified", formatter.format(Date())) self.utils.add(self.index, "date_object_created", self.params.getProperty("date_object_created")) self.params.setProperty( "date_object_modified", time.strftime("%Y-%m-%dT%H:%M:%SZ", time.localtime())) self.utils.add(self.index, "date_object_modified", self.params.getProperty("date_object_modified"))
def saveOrUpdatePrepareCourse(self): stageComment = self.params.safeGetStringParam("stageComment") if stageComment == None or stageComment == "": self.printer.addMessage(u"请输入讨论内容。") return self.printer.printMessage( "manage/course/createPreCourse3.py?prepareCourseId=" + str(self.prepareCourseId), "") pcStageStartDate = SimpleDateFormat("yyyy-M-d").parse(stageStartDate) pcEndDateTime = SimpleDateFormat("yyyy-M-d").parse(stageEndDate) maxOrder = self.pc_svc.getMaxCourseStageOrderIndex( self.prepareCourseId) prepareCourseStage = PrepareCourseStage() prepareCourseStage.setPrepareCourseId(self.prepareCourseId) prepareCourseStage.setTitle(stageTitle) prepareCourseStage.setBeginDate(pcStageStartDate) prepareCourseStage.setFinishDate(pcEndDateTime) prepareCourseStage.setDescription(stageDescription) prepareCourseStage.setOrderIndex(maxOrder + 1) self.pc_svc.createPrepareCourseStage(prepareCourseStage) self.printer.addMessage(u"创建 " + prepareCourseStage.title + u" 阶段成功。") return self.printer.printMessage( "manage/course/createPreCourse3.py?prepareCourseId=" + str(self.prepareCourseId), "")
def processCertInfo(): certInfo = {} dtFormat = SimpleDateFormat("E MMM dd HH:mm:ss yyyy") logFile = open("logs/cert.log", "w") for line in open('logs/expiringCerts.txt'): if 'App Stripe' in line: certInfo['appStripe'] = line.split('=')[1].strip() if 'Keystore' in line: certInfo['keystore'] = line.split('=')[1].strip() if 'Alias' in line: certInfo['alias'] = line.split('=')[1].strip() if 'Certificate status' in line: certInfo['status'] = line.split('=')[1].strip() if 'Expiration Date' in line: #Expiration Date record marks the end of certificate info. Below block processes currrent cert. certInfo['expiryDt'] = line.split('=')[1].strip() expiryDtFmtd = str(certInfo['expiryDt']).replace( "UTC ", "") # Removes UTC from date string to build date object. expiryDtObj = dtFormat.parse(expiryDtFmtd) currDtObj = Date() #Get current date. timeDiff = expiryDtObj.getTime() - currDtObj.getTime() daysBetween = (timeDiff / (1000 * 60 * 60 * 24)) if daysBetween >= 0 and daysBetween <= int( alertDays ) and certInfo['keystore'] in keystoresList and certInfo[ 'alias'] not in certAliasExcptList: #Only concerned about keystores mentioned in the properties file. logFile.write("Certificate in app stripe \"" + certInfo['appStripe'] + "\" and keystore \"" + certInfo['keystore'] + "\" with alias name \"" + certInfo['alias'] + "\" is expiring on " + certInfo['expiryDt'] + "\n") logFile.write("\n") certInfo = {} logFile.close()
def __newDoc(self): self.oid = self.object.getId() self.pid = self.payload.getId() metadataPid = self.params.getProperty("metaPid", "DC") self.utils.add(self.index, "storage_id", self.oid) if self.pid == metadataPid: self.itemType = "object" else: self.oid += "/" + self.pid self.itemType = "datastream" self.utils.add(self.index, "identifier", self.pid) self.utils.add(self.index, "id", self.oid) self.utils.add(self.index, "item_type", self.itemType) self.utils.add(self.index, "last_modified", self.last_modified) self.utils.add(self.index, "harvest_config", self.params.getProperty("jsonConfigOid")) self.utils.add(self.index, "harvest_rules", self.params.getProperty("rulesOid")) self.item_security = [] self.owner = self.params.getProperty("owner", "guest") formatter = SimpleDateFormat('yyyyMMddHHmmss') self.params.setProperty("last_modified", formatter.format(Date())) self.utils.add(self.index, "date_object_created", self.params.getProperty("date_object_created")) self.params.setProperty("date_object_modified", time.strftime("%Y-%m-%dT%H:%M:%SZ", time.localtime()) ) self.utils.add(self.index, "date_object_modified", self.params.getProperty("date_object_modified"))
def hbase_setupjob(job, args): """ Set up a job to run on telemetry date ranges using data from HBase Telemetry jobs expect two arguments, startdate and enddate, both in yyyymmdd format. """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Calendar as Calendar import com.mozilla.hadoop.hbase.mapreduce.MultiScanTableMapReduceUtil as MSTMRU import com.mozilla.util.Pair if len(args) != 2: raise Exception("Usage: <startdate-YYYYMMDD> <enddate-YYYYMMDD>") sdf = SimpleDateFormat(dateformat) startdate = Calendar.getInstance() startdate.setTime(sdf.parse(args[0])) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(args[1])) columns = [com.mozilla.util.Pair('data', 'json')] scans = MSTMRU.generateBytePrefixScans(startdate, enddate, dateformat, columns, 500, False) MSTMRU.initMultiScanTableMapperJob('telemetry', scans, None, None, None, job) # inform HadoopDriver about the columns we expect to receive job.getConfiguration().set("org.mozilla.jydoop.hbasecolumns", "data:json")
def __init__(self, selectFields): BaseQuery.__init__(self, selectFields) self.params = ParamUtil(request) self.prepareCourseId = None self.createUserId = None self.k = self.params.getStringParam("k") #查询关键字 self.ktype = self.params.getStringParam("ktype") #查询类型[关键字对应的类型] if self.ktype == None: self.ktype = "1" #默认关键字查询标题 self.unit = self.params.getStringParam("unit") #主备人所属机构 self.course_BeginDate = self.params.getStringParam("course_BeginDate") self.course_EndDate = self.params.getStringParam("course_EndDate") self.subjectId = self.params.getIntParamZeroAsNull("subjectId") self.gradeId = self.params.getIntParamZeroAsNull("gradeId") request.setAttribute("subjectId", self.subjectId) request.setAttribute("gradeId", self.gradeId) request.setAttribute("k", self.k) request.setAttribute("ktype", self.ktype) request.setAttribute("unit", self.unit) request.setAttribute("course_BeginDate", self.course_BeginDate) request.setAttribute("course_EndDate", self.course_EndDate) self.orderType = 0 self.status = None self.stage = None #集备执行的阶段,正在进行running;已经完成finishaed;还未进行will ;recommend 推荐的 self.containChild = None #准确学科的查询 self.prepareCoursePlanId = None self.prepareCourseGenerated = True self.custormAndWhere = None #自定义条件查询 sft = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") self.nowDate = sft.format(Date())
def create_sale(self, sale): values = ContentValues() values.put("person", sale['person'].lower()) values.put("total", sale['value']) values.put("description", sale['description'].replace('\n', ' ').lower()) values.put("paid", sale['paid']) calendar = Calendar.getInstance() dateformat = SimpleDateFormat('yyyy/MM/dd HH:mm') now = dateformat.format(calendar.getTime()) values.put("date", now) db = self.getWritableDatabase() db.insertWithOnConflict("sale", None, values, SQLiteDatabase.CONFLICT_REPLACE) #remove quantity from products products = sale['description'].split('\n') for product in products: name, quantity = product.split(':') quantity = int(quantity) db.execSQL( "UPDATE product SET quantity = quantity - %d WHERE name='%s'" % (quantity, name)) db.close()
def stringToDate(value): """ Information about how to build your own date pattern is here: http://java.sun.com/j2se/1.5.0/docs/api/java/text/SimpleDateFormat.html """ dateFormat = SimpleDateFormat("dd MMM yyyy HH:mm") return dateFormat.parse(value)
def __activate__(self, context): self.auth = context["page"].authentication self.errorMsg = "" self.request = context["request"] self.response = context["response"] self.fromDtTxt = self.request.getParameter("from") self.toDtTxt = self.request.getParameter("to") self.reportName = self.request.getParameter("reportName") self.dateFormatter = SimpleDateFormat("d/M/yyyy") self.systemConfig = context["systemConfig"] if (self.fromDtTxt is None or self.toDtTxt is None): curCal = Calendar.getInstance() self.fromDtTxt = "1/1/%s" % curCal.get(Calendar.YEAR) self.toDtTxt = "%s/%s/%s" % (curCal.get(Calendar.DAY_OF_MONTH), curCal.get(Calendar.MONTH)+1,curCal.get(Calendar.YEAR)) if (self.reportName is None): self.reportName = "Dashboard Report" if (self.auth.is_logged_in()): if (self.auth.is_admin()==True): self.action = self.request.getParameter("action") if self.action == "export": self.exportDashboard(context) else: self.buildDashboard(context) else: self.errorMsg = "Requires Admin / Librarian / Reviewer access." else: self.errorMsg = "Please login."
def change_view(self): self.vlayout.removeAllViews() self.vlayout.addView(self.text_result) calendar = Calendar.getInstance() dateformat = SimpleDateFormat('yyyy / MM / dd') now = dateformat.format(calendar.getTime()) self.text_result.setText(now)
def process_record(self): content = {} self.set_from_task_vars('u_request', content) self.set_from_task_vars('u_application_name', content) self.set_from_task_vars('cmdb_ci', content) self.set_from_task_vars('priority', content) self.set_from_task_vars('state', content) self.set_from_task_vars('assignment_group', content) self.set_from_task_vars('assigned_to', content) self.set_from_task_vars('due_date', content) self.set_from_task_vars('short_description', content) self.set_from_task_vars('description', content) # Dates need to be converted sdf = SimpleDateFormat("MM-dd-yyyy HH:mm:ss") content['due_date'] = sdf.format(self.task_vars['due_date']) # Also sending release info. content['x_xlbv_xl_release_identifier'] = str(release.id) content['x_xlbv_xl_release_state'] = str(release.status) logger.debug('process_record : send create_record request...') logger.debug('process_record : table name: ' + self.table_name) logger.debug('process_record : content... ') logger.debug(content) response = self.sn_client.create_record(self.table_name, content, getCurrentTask().getId()) logger.debug('process_record : response...') logger.debug(response) return response
def saveOrUpdatePrepareCourse(self): if self.prepareCourse == None: self.prepareCourse = PrepareCourse() self.prepareCourse.setPrepareCoursePlanId(self.prepareCoursePlan.prepareCoursePlanId) else: # 只有admin 和 主备人进行修改 if not(self.accessControlService.isSystemAdmin(self.loginUser) or self.loginUser.userId == self.prepareCourse.createUserId ): self.printer.msg = u"只有 admin 或者创建人、主备人才能进行修改。<br/><br/><a href='createPreCourse.py'>返回</a>" return self.printer.printError() pcTitle = self.params.safeGetStringParam("pcTitle") pcStartDate = self.params.safeGetStringParam("pcStartDate") pcEndDate = self.params.safeGetStringParam("pcEndDate") pcGradeId = self.params.getIntParamZeroAsNull("pcGrade") pcMetaSubjectId = self.params.getIntParamZeroAsNull("pcMetaSubject") pcDescription = self.params.safeGetStringParam("pcDescription") pcLeader = self.params.safeGetIntParam("pcLeader") pcTags = self.params.safeGetStringParam("pcTags") if pcLeader == 0: user_leader = self.loginUser else: user_leader = self.user_svc.getUserById(pcLeader) if user_leader == None: self.printer.msg = u"该用户不存在。<br/><br/><a href='createPreCourse.py'>返回</a>" return self.printer.printError() if pcGradeId == None or pcGradeId == 0: self.printer.msg = u"你必须选择一个学段。" return self.printer.printError() if pcMetaSubjectId == None or pcMetaSubjectId == 0: self.printer.msg = u"你必须选择一个学科。" return self.printer.printError() pcStartDateTime = SimpleDateFormat("yyyy-MM-dd").parse(pcStartDate) pcEndDateTime = SimpleDateFormat("yyyy-MM-dd").parse(pcEndDate) self.prepareCourse.setTitle(pcTitle) self.prepareCourse.setStartDate(pcStartDateTime) self.prepareCourse.setEndDate(pcEndDateTime) self.prepareCourse.setDescription(pcDescription) self.prepareCourse.setMetaSubjectId(int(pcMetaSubjectId)) self.prepareCourse.setGradeId(int(pcGradeId)) self.prepareCourse.setCreateUserId(self.loginUser.userId) self.prepareCourse.setCreateDate(Date()) self.prepareCourse.setLockedDate(Date()) self.prepareCourse.setLeaderId(user_leader.getUserId()) self.prepareCourse.setLockedUserId(0) self.prepareCourse.setPrepareCourseEditId(0) self.prepareCourse.setTags(pcTags) if self.prepareCourseId > 0: self.pc_svc.updatePrepareCourse(self.prepareCourse) self.printer.msg = u"您的 <span style='color:#f00'>" + pcTitle + u"</span> 修改成功。<br/><br/><a href='showPrepareCourse.py?prepareCourseId=" + str(self.prepareCourse.prepareCourseId) + "'>返回</a>" return self.printer.printError() else: self.pc_svc.createPrepareCourse(self.prepareCourse) self.printer.msg = u"您的 <span style='color:#f00'>" + pcTitle + u"</span> 创建成功。<br/><br/><a href='showPrepareCourse.py?prepareCourseId=" + str(self.prepareCourse.prepareCourseId) + "'>返回</a>" return self.printer.printError()
def _parseStartDate(self, date): r'@types: str -> java.util.Date or None' try: dateFormat = SimpleDateFormat("HHmmss yyyyMMdd") return dateFormat.parse(date) except: logger.warnException('Failed to convert start date: %s' ' to HHmmss yyyyMMdd' % date)
def _parseDate(self, dateStr): for parsePattern in ["MM/dd/yyyy HH:mm:ss", "MM.dd.yyyy HH:mm:ss", "yyyyMMddHHmmss"]: try: logger.debug('Trying to parse date string %s with pattern %s' % (dateStr, parsePattern)) dateFormat = SimpleDateFormat(parsePattern) return dateFormat.parse(dateStr) except: logger.warn('Failed parsing date %s with date format %s' % (dateStr, parsePattern))
def parse_time(time): r'@types: str->java.util.Date or None' try: from java.text import SimpleDateFormat s = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") return s.parse(time) except: logger.debug('Failed to parse time: %s' % time)
def getVigentStretchesQuery(store, fecha): #query = getCarreterasManager().getVigentStretchesQuery(store, fecha) dateFormatter = SimpleDateFormat("dd/MM/yyyy") formatedDate = dateFormatter.format(fecha) filtro = "( fecha_entrada <= '%s' OR fecha_entrada IS NULL) AND ('%s' <= fecha_salida OR fecha_salida IS NULL)" % ( formatedDate, formatedDate) query = store.createFeatureQuery() query.addFilter(filtro) return query
def getFieldAsDate(field, pattern): if isinstance(field, unicode): formatter = SimpleDateFormat(pattern) newDate = formatter.parse(field) return newDate elif isinstance(field, Date): return field else: return None
def __init__(self, selectFields): BaseQuery.__init__(self, selectFields) self.params = ParamUtil(request) self.prepareCourseId = None self.userId = None self.privateContentExist = None #是否只显示有内容的个案 默认None,全部显示 True,查询有内容的,False 查询无内容的 self.stage = None #集备执行的阶段,正在进行running;已经完成finishaed;还未进行will sft = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") self.nowDate = sft.format(Date())
def findTimestamp(self, dateTup, timeTup): str_date = dateTup + " " + timeTup + " GMT" formatter = SimpleDateFormat("dd MMM yyyy HH:mm:ss z") p_date = formatter.parse(str_date) parsed_year = p_date.getYear() if parsed_year > 3000: print "parsed year is too large: " + str(parsed_year) return 0 else: sc_tmstmp = Timestamp(p_date.getTime()) return sc_tmstmp.getTime() / 1000
def applyWhereCondition(self, qctx): #print "--self.subjectId="+str(self.subjectId) if self.enabled != None: qctx.addAndWhere("ev.enabled = :enabled") qctx.setBoolean("enabled", self.enabled) if self.ValidPlan == True: nowDate = SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()) qctx.addAndWhere("(:nowDate >= ev.startDate) And (:nowDate <= ev.endDate)") qctx.setString("nowDate", nowDate) qctx.setString("nowDate", nowDate) if self.title !=None : newKey = self.title.replace("%","[%]").replace("_","[_]").replace("[","[[]") qctx.addAndWhere("ev.evaluationCaption LIKE :title") qctx.setString("title", "%" + newKey + "%") if self.subjectId != None: qctx.addAndWhere("ev.metaSubjectId = :subjectId") qctx.setInteger("subjectId", self.subjectId) if self.gradeId != None: qctx.addAndWhere("ev.metaGradeId = :gradeId") qctx.setInteger("gradeId", self.gradeId) if self.teacherName != None: tName = self.teacherName.replace("%","[%]").replace("_","[_]").replace("[","[[]") qctx.addAndWhere("ev.teacherName LIKE :teacherName") qctx.setString("teacherName", "%" + tName +"%") if self.listType == 0: #已经完成的评课 nowDate = SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()) qctx.addAndWhere(":nowDate > ev.endDate") qctx.setString("nowDate", nowDate) if self.userId>0: qctx.addAndWhere("ev.teacherId = :userId") qctx.setInteger("userId", self.userId) elif self.listType == 1: #进行中的评课 nowDate = SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()) qctx.addAndWhere("(:nowDate >= ev.startDate) And (:nowDate <= ev.endDate)") qctx.setString("nowDate", nowDate) qctx.setString("nowDate", nowDate) if self.userId>0: qctx.addAndWhere("ev.teacherId = :userId") qctx.setInteger("userId", self.userId) elif self.listType == 2: #我发起的评课 qctx.addAndWhere("ev.createrId=:userId") qctx.setInteger("userId", self.userId) elif self.listType == 3: #我参与的评课 qctx.addAndWhere(" ev.evaluationPlanId IN(SELECT ec.evaluationPlanId FROM EvaluationContent as ec WHERE ec.publishUserId=:userId)") qctx.setInteger("userId", self.userId)
def findTimestamp (self, dateTup, timeTup): str_date = dateTup + " " + timeTup + " GMT" formatter = SimpleDateFormat("dd MMM yyyy HH:mm:ss z") p_date = formatter.parse(str_date) parsed_year = p_date.getYear() if parsed_year > 3000: print "parsed year is too large: " + str(parsed_year) return 0 else: sc_tmstmp = Timestamp (p_date.getTime()) return sc_tmstmp.getTime() / 1000
def _parseDate(self, dateStr): for parsePattern in [ "MM/dd/yyyy HH:mm:ss", "MM.dd.yyyy HH:mm:ss", "yyyyMMddHHmmss" ]: try: logger.debug('Trying to parse date string %s with pattern %s' % (dateStr, parsePattern)) dateFormat = SimpleDateFormat(parsePattern) return dateFormat.parse(dateStr) except: logger.warn('Failed parsing date %s with date format %s' % (dateStr, parsePattern))
def createSimpleTask(phaseId, taskTypeValue, title, propertyMap): parenttaskType = Type.valueOf(taskTypeValue) parentTask = parenttaskType.descriptor.newInstance("nonamerequired") parentTask.setTitle(title) sdf = SimpleDateFormat("yyyy-MM-dd hh:mm:ss") for item in propertyMap: if item.lower().find("date") > -1: if propertyMap[item] is not None and len(propertyMap[item]) != 0: parentTask.setProperty(item, sdf.parse(propertyMap[item])) else: parentTask.setProperty(item, propertyMap[item]) taskApi.addTask(phaseId, parentTask)
def getQuery(self): sql = "Select dp From DocumentPosition dp Where (dp.debitZpk.id = :did or dp.creditZpk.id = :cid) and dp.bookingPeriod.defaultPeriod = 1 and dp.createdAt >= :from and dp.createdAt <= :to Order By dp.createdAt ASC" query = self._entityManager.createQuery(sql) query.setParameter("did", self._zpk.getId()) query.setParameter("cid", self._zpk.getId()) query.setParameter("from", SimpleDateFormat('dd-MM-yyyy').parse(self._from), TemporalType.DATE) query.setParameter("to", SimpleDateFormat('dd-MM-yyyy').parse(self._to), TemporalType.DATE) return query
def createSimpleTask(phaseId, taskTypeValue, title, propertyMap): parenttaskType = Type.valueOf(taskTypeValue) parentTask = parenttaskType.descriptor.newInstance("nonamerequired") parentTask.setTitle(title) sdf = SimpleDateFormat("yyyy-MM-dd hh:mm:ss") for item in propertyMap: if item.lower().find("date") > -1: if propertyMap[item] is not None and len(propertyMap[item]) != 0: parentTask.setProperty(item,sdf.parse(propertyMap[item])) else: parentTask.setProperty(item,propertyMap[item]) taskApi.addTask(phaseId,parentTask)
def editPrepareCourseStage(self): prepareCourseStageId = self.params.getIntParam("prepareCourseStageId") if prepareCourseStageId > 0: prepareCourseStage = self.pc_svc.getPrepareCourseStage( prepareCourseStageId) if prepareCourseStage == None: errDesc = u"加载备课流程失败。" response.getWriter().write(errDesc) return else: errDesc = u"无效的标识。" response.getWriter().write(errDesc) return stageTitle = self.params.safeGetStringParam("stageTitle") stageStartDate = self.params.safeGetStringParam("stageStartDate") stageEndDate = self.params.safeGetStringParam("stageEndDate") stageDescription = self.params.getStringParam("stageDescription") stageOrderIndex = self.params.getIntParam("stageOrderIndex") if stageTitle == None or stageTitle == "": errDesc = u"请输入一个流程名。<a href='#' onclick='window.history.back();return false;'>返回</a>" response.getWriter().write(errDesc) return if stageStartDate == None or stageStartDate == "": errDesc = u"请输入一个开始日期。<a href='#' onclick='window.history.back();return false;'>返回</a>" response.getWriter().write(errDesc) return if stageEndDate == None or stageEndDate == "": errDesc = u"请输入一个结束日期。<a href='#' onclick='window.history.back();return false;'>返回</a>" response.getWriter().write(errDesc) return stageStartDate = stageStartDate.replace("—", "-") stageEndDate = stageEndDate.replace("—", "-") pcStageStartDate = SimpleDateFormat("yyyy-MM-dd").parse(stageStartDate) pcEndDate = SimpleDateFormat("yyyy-MM-dd").parse(stageEndDate) if DateUtil.compareDateTime(pcStageStartDate, pcEndDate) > 0: errDesc = u"结束时间不能早于开始时间。<a href='#' onclick='window.history.back();return false;'>返回</a>" response.getWriter().write(errDesc) return prepareCourseStage.setTitle(stageTitle) prepareCourseStage.setBeginDate(pcStageStartDate) prepareCourseStage.setFinishDate(pcEndDate) prepareCourseStage.setDescription(stageDescription) prepareCourseStage.setOrderIndex(stageOrderIndex) self.pc_svc.updatePrepareCourseStage(prepareCourseStage) errDesc = u"修改 " + prepareCourseStage.title + u" 流程成功。" response.getWriter().write(errDesc) return
def __init__(self, threadName, workerConfig): WorkerThread.__init__(self, threadName, workerConfig) self.logger = Logger.getLogger("ElasticSearch.EsLogger") # self.logger.setLevel(Level.DEBUG) self.workerConfig = workerConfig # Default Bulk Request Settings for ES Logging. # - Set to True to use bulk requests for logs. self.useBulkReq = False self.bulkReqCounter = 0 self.bulkReqExecCountTrigger = 1000 self.lastBulkReqFlush = datetime.now() if (threadName is None): # ==== 1st Instance (threadName is None) ==== # Get the EsLogger queue. # This object will feed the queue through this reference. self.wq = workerConfig.wq self.esNode = self.workerConfig.esNode else: # ==== 2nd Instance (threadName is not None) ==== self.esNode = self.workerConfig.esNode self.esClient = self.esNode.getClient() self.esBulkReq = EsBulkReq(self.esClient, None) self.indexName = workerConfig.indexName # If bulkReq config are set in the workerConfig object, use them. if workerConfig.useBulkReq is not None: self.useBulkReq = workerConfig.useBulkReq if workerConfig.bulkReqExecCountTrigger is not None: self.bulkReqExecCountTrigger = workerConfig.bulkReqExecCountTrigger # Json SerDe objects self.boon = BoonJson() self.esLoggerWorker = None self.esLoggerThread = None self.stopThread = False self.threaded = False self.dtfmt = SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ") if threadName is None: self.threadName = "EsLoggerController" # Startup the Background thread. self.startEsLoggerThread() else: self.threadName = threadName
def setupjob(job, args): """ Set up a job to run on a date range of directories. Jobs expect two arguments, startdate and enddate, both in yyyy-MM-dd format. """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Date as Date import java.util.Calendar as Calendar import com.mozilla.util.DateUtil as DateUtil import com.mozilla.util.DateIterator as DateIterator import org.apache.hadoop.mapreduce.lib.input.FileInputFormat as FileInputFormat import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat as MyInputFormat if len(args) != 3: raise Exception( "Usage: <testpilot_study> <startdate-YYYY-MM-DD> <enddate-YYYY-MM-DD>" ) # use to collect up each date in the given range class MyDateIterator(DateIterator): def __init__(self): self._list = [] def get(self): return self._list def see(self, aTime): self._list.append(aTime) sdf = SimpleDateFormat(dateformat) study = args[0] startdate = Calendar.getInstance() startdate.setTime(sdf.parse(args[1])) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(args[2])) dates = MyDateIterator() DateUtil.iterateByDay(startdate.getTimeInMillis(), enddate.getTimeInMillis(), dates) paths = [] for d in dates.get(): paths.append(pathformat % (study, sdf.format(Date(d)))) job.setInputFormatClass(MyInputFormat) FileInputFormat.setInputPaths(job, ",".join(paths)) job.getConfiguration().set("org.mozilla.jydoop.mappertype", "TEXT")
def tail_logfile(log_file, from_time): from java.text import SimpleDateFormat import re pattern = re.compile("\[(.*)\]") formatter = SimpleDateFormat("dd/MMM/yy:H:m:s Z") for line in log_file[1].split('\n'): mymatch = pattern.match(line) if mymatch: timestamp = mymatch.group(1) timestamp_object = formatter.parse(timestamp) if date_compare(from_time, timestamp_object) == 'Less': print line
def tail_logfile(log_file,from_time): from java.text import SimpleDateFormat import re pattern=re.compile("\[(.*)\]") formatter = SimpleDateFormat("dd/MMM/yy:H:m:s Z") for line in log_file[1].split('\n'): mymatch = pattern.match(line) if mymatch: timestamp=mymatch.group(1) timestamp_object = formatter.parse(timestamp) if date_compare(from_time,timestamp_object) == 'Less': print line
def __init__(self, selectFields): BaseQuery.__init__(self, selectFields) self.params = ParamUtil(request) self.orderType = 0 self.createUserId = None self.ownerType = None self.ownerId = None self.status = None self.qryDate = None self.k = self.params.getStringParam("k") self.filter = self.params.getStringParam("filter") sft = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") self.nowDate = sft.format(Date())
def actionPerformed(self,actionEvent): self.scl_long_tuneup_controller.getMessageTextField().setText("") rightNow = Calendar.getInstance() date_format = SimpleDateFormat("MM.dd.yyyy") time_str = date_format.format(rightNow.getTime()) fc = JFileChooser(constants_lib.const_path_dict["XAL_XML_ACC_FILES_DIRS_PATH"]) fc.setDialogTitle("Save SCL data into the SCL_new.xdxf file") fc.setApproveButtonText("Save") fl_filter = FileNameExtensionFilter("SCL Acc File",["xdxf",]) fc.setFileFilter(fl_filter) fc.setSelectedFile(File("SCL_"+time_str+".xdxf")) returnVal = fc.showOpenDialog(self.scl_long_tuneup_controller.linac_wizard_document.linac_wizard_window.frame) if(returnVal == JFileChooser.APPROVE_OPTION): fl_out = fc.getSelectedFile() fl_path = fl_out.getPath() if(fl_path.rfind(".xdxf") != (len(fl_path) - 5)): fl_out = File(fl_out.getPath()+".xdxf") #---------prepare the XmlDataAdaptor root_DA = XmlDataAdaptor.newEmptyDocumentAdaptor() scl_DA = root_DA.createChild("xdxf") scl_DA.setValue("date",time_str) scl_DA.setValue("system","sns") scl_DA.setValue("version","2.0") #---- SCLMed seq_name_arr = ["SCLMed","SCLHigh","HEBT1"] for seq_name in seq_name_arr: accl = self.scl_long_tuneup_controller.linac_wizard_document.accl seq = accl.findSequence(seq_name) cavs = seq.getAllNodesWithQualifier(AndTypeQualifier().and((OrTypeQualifier()).or(SCLCavity.s_strType))) quads = seq.getAllNodesWithQualifier(AndTypeQualifier().and((OrTypeQualifier()).or(Quadrupole.s_strType))) scl_seq_DA = scl_DA.createChild("sequence") scl_seq_DA.setValue("id",seq.getId()) for quad in quads: node_DA = scl_seq_DA.createChild("node") node_DA.setValue("id",quad.getId()) attr_DA = node_DA.createChild("attributes") field_DA = attr_DA.createChild("magnet") scl_quad_fields_dict_holder = self.scl_long_tuneup_controller.scl_long_tuneup_init_controller.scl_quad_fields_dict_holder field_DA.setValue("dfltMagFld",str(scl_quad_fields_dict_holder.quad_field_dict[quad])) for cav in cavs: node_DA = scl_seq_DA.createChild("sequence") node_DA.setValue("id",cav.getId()) attr_DA = node_DA.createChild("attributes") rf_cav_DA = attr_DA.createChild("rfcavity") cav_wrappper = self.scl_long_tuneup_controller.getCav_WrapperForCavId(cav.getId()) (amp,phase) = (cav_wrappper.designAmp,cav_wrappper.designPhase) rf_cav_DA.setValue("amp",float("%8.5f"%amp)) rf_cav_DA.setValue("phase",float("%8.3f"%phase)) root_DA.writeTo(fl_out)
def main(*args): layer = gvsig.currentLayer() store = layer.getFeatureStore() fset = store.getFeatureSet() for feature in fset: field = feature.get("HORA") pattern = "HH:mm:ss" #field = "23:10:10" formatter = SimpleDateFormat(pattern) newDate = formatter.parse(field) cal = Calendar.getInstance() cal.setTime(newDate) hour = cal.get(Calendar.HOUR_OF_DAY) print hour
def dateformat(t, format, language=None): """ Format python date to string using Java SimpleDateFormat. :param t: Python date. :returns: Format string of the date """ jt = jdate(t) if language is None: df = SimpleDateFormat(format) else: locale = Locale(language) df = SimpleDateFormat(format, locale) return df.format(jt)
def setupjob(job, args): """ Set up a job to run on a date range of directories. Jobs expect two arguments, startdate and enddate, both in yyyy-MM-dd format. """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Date as Date import java.util.Calendar as Calendar import com.mozilla.util.DateUtil as DateUtil import com.mozilla.util.DateIterator as DateIterator import org.apache.hadoop.mapreduce.lib.input.FileInputFormat as FileInputFormat import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat as MyInputFormat if len(args) != 3: raise Exception("Usage: <testpilot_study> <startdate-YYYY-MM-DD> <enddate-YYYY-MM-DD>") # use to collect up each date in the given range class MyDateIterator(DateIterator): def __init__(self): self._list = [] def get(self): return self._list def see(self, aTime): self._list.append(aTime) sdf = SimpleDateFormat(dateformat) study = args[0] startdate = Calendar.getInstance() startdate.setTime(sdf.parse(args[1])) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(args[2])) dates = MyDateIterator() DateUtil.iterateByDay(startdate.getTimeInMillis(), enddate.getTimeInMillis(), dates) paths = [] for d in dates.get(): paths.append(pathformat % (study, sdf.format(Date(d)))) job.setInputFormatClass(MyInputFormat) FileInputFormat.setInputPaths(job, ",".join(paths)); job.getConfiguration().set("org.mozilla.jydoop.mappertype", "TEXT")
def _parseDateString(installDateString): installationDateAsDate = None if installDateString: for format in ['yyyyMMdd', 'yyyyMMddHHmmss.SSSSSS-000', 'EEE dd MMM yyyy HH:mm:ss aa zzz']: if len(installDateString) == len(format): try: from java.text import SimpleDateFormat from java.util import TimeZone dateFormatter = SimpleDateFormat(format) dateFormatter.setTimeZone(TimeZone.getTimeZone("GMT")) installationDateAsDate = dateFormatter.parse(installDateString) except java.text.ParseException: # could not parse date # print 'could not parse' + installDateString + ' as ' + format pass return installationDateAsDate
def __init__(self, imp): '''Get the metadata from the given dm3 image. ''' extractor = GatanMetadataExtractor(imp) self.exposure = extractor.getExposure() self.magnification = extractor.getMagnification() self.mag_factor = extractor.getActualMagnification() / self.magnification self.mag_unit = 'x' if not Double.isNaN(extractor.getEnergyloss()): self.energyloss = extractor.getEnergyloss() else: self.energyloss = 0 self.date = extractor.getDateAndTime() date_formater = SimpleDateFormat('yyyyMMdd') self.date_string = date_formater.format(self.date) self.name = extractor.getName() self.prop_dict = {}
def savePreviousArguments(managedServerName): from java.io import File from java.io import FileOutputStream from java.util import Properties from java.util import Date from java.text import SimpleDateFormat import string startToEdit() # parameter on the wsdl ant task call fileLocation = sys.argv[1].replace("\\","/") print "The backup file location is" print fileLocation try: dateFormat = SimpleDateFormat('_d_MMM_yyyy_HH_mm_ss') date = Date() formattedDate = dateFormat.format(date) print formattedDate except: print "The date cannot be created/formatted" try: propsFile = File(fileLocation+ managedServerName + formattedDate+"_config.bkp"); print propsFile.exists() if(propsFile.exists() == 0): propsFile.createNewFile() except: print "The file cannot be created on:" print propsFile.getAbsoluteFile() dumpStack() previousProperties = Properties() print '===> Saving the previous arguments - ' + managedServerName cd('/Servers/'+managedServerName) print "Getting the VMArgs" vmArgs = cmo.getServerStart().getArguments() print vmArgs if vmArgs == None: vmArgs = "" previousProperties.setProperty("vmArgs", vmArgs) print "Saving Arguments to file" previousProperties.store(FileOutputStream(propsFile),None) print '===> Saved arguments! Please verify the file on:'+ fileLocation + "in" + managedServerName
def _parseSoftwareComponent(self, softwareComponentNode): name = self._evalToString('NAME/@value', softwareComponentNode) vendor = self._evalToString('VENDOR/@value', softwareComponentNode) release = self._evalToString('RELEASE/@value', softwareComponentNode) serviceLevel = self._evalToString('SERVICELEVEL/@value', softwareComponentNode) if serviceLevel.isdigit(): serviceLevel = int(serviceLevel) patchLevel = self._evalToString('PATCHLEVEL/@value', softwareComponentNode) if patchLevel.isdigit(): patchLevel = int(patchLevel) counter = self._evalToString('COUNTER/@value', softwareComponentNode) provider = self._evalToString('PROVIDER/@value', softwareComponentNode) location = self._evalToString('LOCATION/@value', softwareComponentNode) applied = self._evalToString('APPLIED/@value', softwareComponentNode) try: dateFormat = SimpleDateFormat('yyyyMMDDHHmmss') applied = dateFormat.parse(applied) except: logger.warnException('Failed to parse applied date') applied = None return self.SoftwareComponent(name, vendor, release, serviceLevel, patchLevel, counter, provider, location, applied)
def parseSuccess(self, output): """ @types: str -> [DbSchemaResult] or [] """ result = [] format_ = SimpleDateFormat("yyyy-MM-dd HH:mm:ss Z") for line in self.stripHeader(output): line = line.strip() if line: line = line.strip("|") schemaInfo = re.split("\s*\|\s*", line) if len(schemaInfo) == 5: dateStr = "%s %s %s" % (schemaInfo[2], schemaInfo[3], self.__getTimeZone(schemaInfo[4])) date = None try: date = format_.parse(dateStr) except ParseException, ex: logger.debugException(ex.getMessage()) logger.warn('Cannot parse date "%s": %s' % (dateStr, ex.getMessage())) result.append(DbSchemaResult(schemaInfo[1].strip(), schemaInfo[0].strip(), date))
def isHung(self): sdi = SimpleDateFormat('d MMM yyyy HH:mm:ss,SSS') today = datetime.datetime.today() # no need to check if too close to midnight -- don't bother handle wrapping if today.hour <= 1: log('INFO: hang check deferred until after 1AM') return False # formulate today's log file name lf = '/var/log/ems/info.%04d%02d%02d' % (today.year,today.month,today.day) + '.log' # get the last logged message fd = os.popen('tac %s | fgrep -m 1 purge-client' % lf, 'r') line = fd.readline() fd.close() # nothing logged if line is None: log('WARN: when checking for hang the log file line was empty') return False line = line.strip() # nothing logged if len(line) <= 0: log('WARN: when checking for hang the log file was empty') return False d = sdi.parse(line,ParsePosition(0)) td = Date().getTime() - d.getTime() td = td / 1000 td = td / 60 # 60 minutes is too long, report hang return td > self.hangTimeout
def getPendingUpdates(self, oid): storage = self.Services.getStorage() object = storage.getObject(oid) indexFile = File(object.getPath() + "/parked_Version_Index.json") self.pendingUpdates = [] self.allUpdates = [] if indexFile.exists(): dateFormatter = SimpleDateFormat("yyyyMMddHHmmss") modifiedDate = dateFormatter.parse(object.getMetadata().getProperty("last_modified")) parkedVersions = JsonSimple(indexFile).getJsonArray() for version in parkedVersions: ts = version.get("timestamp") versionDate = dateFormatter.parse(ts) self.allUpdates.append(ts) if versionDate.after(modifiedDate): self.pendingUpdates.append( ts) object.close() self.pendingUpdateSize = len(self.pendingUpdates) self.allUpdateSize = len(self.allUpdates) return self.pendingUpdates
def getContacts(self, userAccounts): self.contactsLoadProgress = 0 contactAccountsTemp = {} nodes = self.xmlRoot.firstChild.childNodes for i in range(len(nodes)): # check if this action was aborted if self.isAborted(): return None node = nodes[i] if not isinstance(node, minidom.Element): continue df = SimpleDateFormat("yyyy-MM-dd HH:mm:ss") date = df.parse(node.getElementsByTagName('timestamp')[0].firstChild.toxml()) name = node.getElementsByTagName('name') if len(name) == 0: name = "" else: name = name[0].firstChild.toxml() uid = node.getElementsByTagName('from')[0].firstChild.toxml() ca = ContactAccount(0, name, uid, "", None, self.protocol) if not contactAccountsTemp.has_key(ca): contactAccountsTemp[ca] = [] content = node.getElementsByTagName('body')[0].firstChild.toxml() msg = Message(0, None, content, date, True) contactAccountsTemp[ca].append(msg) self.messagesCount += 1 self.contactsLoadProgress = i * 100 /len(nodes) contacts = [] for ca in contactAccountsTemp.iterkeys(): ca.conversations = ConversationHelper.messagesToConversations(contactAccountsTemp[ca], ca, userAccounts[0]) cnt = Contact(0, "", "", ca.name) cnt.addContactAccount(ca) contacts.append(cnt) self.contactsLoadProgress = 100 return contacts
def getLastBootDate(self, bootDateFormat): logger.debug("Discovering last boot date via net stats") output = self.shell.execCmd('net stats srv')#@@CMD_PERMISION ntcmd protocol execution if output and self.shell.getLastCmdReturnCode() == 0: lines = output.split('\n') # get rid of empty lines: lines = [line.strip() for line in lines if line.strip()] # Second line contains 'Statistics since <date>' where date can be in 12 or 24 format dateLine = lines[1] bootDateStr = None matcher = re.search(r"\d{1,4}([./-])\d{1,4}\1\d{1,4}", dateLine) if matcher: bootDateStr = matcher.group() bootTimeStr = None bootTimeFormat = None matcher = re.search(r"\d{1,2}:\d{2}( (a|p)m)?", dateLine, re.I) if matcher: bootTimeStr = matcher.group() ampm = matcher.group(1) if ampm: bootTimeFormat = "h:mm a" else: bootTimeFormat = "H:mm" if bootDateStr and bootDateFormat and bootTimeStr: resultDateStr = "%s %s" % (bootDateStr, bootTimeStr) resultDateFormat = "%s %s" % (bootDateFormat, bootTimeFormat) try: formatter = SimpleDateFormat(resultDateFormat) result = formatter.parse(resultDateStr) logger.debug('Date = %s' % result) return result except: logger.warn("Error parsing date string '%s' with format '%s'" % (resultDateStr, resultDateFormat)) return None raise ValueError, 'Failed getting data from net stats srv.'
def setupjob(job, args): """ Set up a job to run on crash-stats date ranges. Expects three arguments: startdate (yymmdd) enddate (yymmdd) """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Calendar as Calendar import com.mozilla.hadoop.hbase.mapreduce.MultiScanTableMapReduceUtil as MSTMRU from com.mozilla.util import Pair if len(args) != 2: raise Exception("Usage: <startdate-yymmdd> <enddate-yymmdd>") startarg, endarg = args sdf = SimpleDateFormat(dateformat) startdate = Calendar.getInstance() startdate.setTime(sdf.parse(startarg)) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(endarg)) columns = [Pair(family, qualifier) for family, qualifier in columnlist] scans = MSTMRU.generateHexPrefixScans(startdate, enddate, dateformat, columns, 500, False) MSTMRU.initMultiScanTableMapperJob( 'crash_reports', scans, None, None, None, job) # inform HadoopDriver about the columns we expect to receive job.getConfiguration().set("org.mozilla.jydoop.hbasecolumns", ','.join(':'.join(column) for column in columnlist))
def flowFileDates(hmsRunName): #will not be accurate if there is more than one set of data--specify start date in config file? """Find the dates of any FLOW files in the DSS catalog""" from java.text import SimpleDateFormat dateFormat = SimpleDateFormat("ddMMMyyyy") print("Getting dates from " + hmsRunName + "...") dates = [] #print(dss.getCatalogedPathnames()) flowFiles = filter(lambda f:((f.split('/')[3] == 'FLOW') and (f.split('/')[6] == ('RUN:'+hmsRunName.upper()))), dss.getCatalogedPathnames()) #print(flowFiles) candidateDates = map(lambda x:x.split('/')[4], flowFiles) #print(candidateDates) for d in candidateDates: if d[0:2].isdigit() and d[2:5].isalpha() and d[5:9].isdigit(): date = dateFormat.parse(d) dateAlreadyFound = any(lambda x:x.equals(date), dates) if not dateAlreadyFound: dates.append(date) dates.sort(lambda a,b:a.compareTo(b)) return map(lambda d: dateFormat.format(d).upper(), dates)
def getSqlDateInGMT(date): """ @type: long->str @param: date - timestamp """ dateObj = Date(date) df = SimpleDateFormat() df.setTimeZone(TimeZone.getTimeZone("GMT")) # Default Java Date.toString() format df.applyPattern('EEE MMM dd HH:mm:ss zzz yyyy') return df.format(dateObj)
def __activate__(self, context): self.auth = context["page"].authentication self.response = context["response"] self.request = context["request"] self.dateFormatter = SimpleDateFormat("yyyy-MM-dd") self.errorMsg = "" if (self.auth.is_logged_in()): if (self.auth.is_admin()==True): self.chartName = context["formData"].get("chartName") self.buildBarChart(context) else: self.errorMsg = "Requires Admin / Librarian / Reviewer access." else: self.errorMsg = "Please login." if (self.errorMsg!=""): self.response.setStatus(404) writer = self.response.getPrintWriter("text/plain; charset=UTF-8") writer.println(self.errorMsg) writer.close()
def getDate(self): calendar = GregorianCalendar() date = calendar.getTime() dayFormat = SimpleDateFormat("dd") monthFormat = SimpleDateFormat("MM") yearFormat = SimpleDateFormat("yyyy") DAY = int(dayFormat.format(date)) MONTH = int(monthFormat.format(date)) YEAR = int(yearFormat.format(date)) if MONTH < 10: TEMP1 = "%d0%d" % (YEAR, MONTH) else: TEMP1 = "%d%d" % (YEAR, MONTH) if DAY < 10: CURRENTDATE = "%d0%d" % (TEMP1, DAY) else: CURRENTDATE = "%d%d" % (TEMP1, DAY) return CURRENTDATE
def processObjects(allObjects, DateParsePattern): vector = ObjectStateHolderVector() iter = allObjects.iterator() #ciList = [[id, type, props]] ciList = [] ciDict = {} createCi = 1 while iter.hasNext(): #attributes = [name, type, key, value] attributes = [] objectElement = iter.next() mamId = objectElement.getAttribute('mamId').getValue() cit = objectElement.getAttribute('name').getValue() if mamId != None and cit != None: # add the attributes... allAttributes = objectElement.getChildren('field') iterAtt = allAttributes.iterator() while iterAtt.hasNext(): attElement = iterAtt.next() attName = attElement.getAttribute('name').getValue() attType = attElement.getAttribute('datatype').getValue() attKey = attElement.getAttribute('key') attValue = attElement.getText() if attType == None or attType == "": attType = "string" if attKey == None or attKey == "": attKey = "false" else: attKey = attKey.getValue() if attName != "" and attType != "": attributes.append([attName, attType, attKey, attValue]) # create CI or not? Is key empty or none? if attKey == "true": if attValue != None and attValue != "": createCi = 1 else: createCi = 0 #info (concatenate("Id: ", mamId, ", Type: ", cit, ", Properties: ", attributes)) if createCi == 1: ciList.append([mamId, cit, attributes]) #ciDict[mamId] = [mamId, cit, attributes] #print "MAMID = ", mamId, ", CIT = ", cit, ", Attributes = ", attributes for ciVal in ciList: logger.info("\tAdding %s [%s] => [%s]" % (ciVal[1], ciVal[0], ciVal[2]) ) id = ciVal[0] type = ciVal[1] osh = ObjectStateHolder(type) if ciVal[2] != None: props = ciVal[2] createContainer = 0 containerOsh = None for prop in props: if prop[0] == 'root_container' and prop[3] != "" and ciDict.has_key(prop[3]): containerOsh = ciDict[prop[3]] createContainer = 1 if prop[1] == 'integer': prop[3] and prop[3].isdigit() and osh.setIntegerAttribute(prop[0], prop[3]) elif prop[1] == 'long': prop[3] and prop[3].isdigit() and osh.setLongAttribute(prop[0], prop[3]) elif prop[1] == 'enum': osh.setEnumAttribute(prop[0], int(prop[3])) elif prop[1] == 'boolean': if str(prop[3]).lower == 'false': osh.setBoolAttribute(prop[0], 0) else: osh.setBoolAttribute(prop[0], 1) elif prop[1] == 'date': if DateParsePattern != None and DateParsePattern != "": formatter = SimpleDateFormat(DateParsePattern) osh.setDateAttribute(prop[0], formatter.parseObject(prop[3])) else: osh.setAttribute(prop[0], prop[3]) if createContainer == 1: osh.setContainer(containerOsh) vector.add(osh) ciDict[id] = osh return (vector, ciDict)
def evaluate(self, times, max_time, origins_csv, destinations_csv, csv_writer, split=500, do_merge=False): ''' evaluate the shortest paths between origins and destinations uses the routing options set in setup() (run it first!) Parameters ---------- times: list of date times, the desired start/arrival times for evaluation origins_csv: file with origin points destinations_csv: file with destination points csv_writer: CSVWriter, configured writer to write results do_merge: merge the results over time, only keeping the best connections max_time: maximum travel-time in seconds (the smaller this value, the smaller the shortest path tree, that has to be created; saves processing time) ''' origins = self.otp.loadCSVPopulation(origins_csv, LATITUDE_COLUMN, LONGITUDE_COLUMN) destinations = self.otp.loadCSVPopulation(destinations_csv, LATITUDE_COLUMN, LONGITUDE_COLUMN) sources = origins if not self.arrive_by else destinations n_slices = (sources.size() / split) + 1 if n_slices > 1: print 'Splitting sources into {} part(s) with {} points each part'.format(n_slices, split) from_index = 0; to_index = 0; i = 1 while True: if to_index >= sources.size(): break from_index = to_index to_index += split if to_index >= sources.size(): to_index = sources.size() sliced_sources = sources.get_slice(from_index, to_index) if n_slices > 1: print('calculating part {}/{}'.format(i, n_slices)) i += 1 if not self.arrive_by: origins = sliced_sources else: destinations = sliced_sources self.request.setOrigins(origins) self.request.setDestinations(destinations) self.request.setLogProgress(self.print_every_n_lines) if self.arrive_by: time_note = ' arrival time ' else: time_note = 'start time ' # # if evaluation is performed in a time window, routes exceeding the window will be ignored # # (worstTime already takes care of this, but the time needed to reach the snapped the OSM point is also taken into account here) # if len(times) > 1: # print 'Cutoff set: routes with {}s exceeding the time window ({}) will be ignored (incl. time to reach OSM-net)'.format(time_note, times[-1]) # cutoff = times[-1] # self.request.setCutoffTime(cutoff.year, cutoff.month, cutoff.day, cutoff.hour, cutoff.minute, cutoff.second) # iterate all times results = [] # dimension (if not merged): times x targets (origins resp. destinations) sdf = SimpleDateFormat('HH:mm:ss') sdf.setTimeZone(TimeZone.getTimeZone("GMT +2")) for t, date_time in enumerate(times): # compare seconds since epoch (different ways to get it from java/python date) epoch = datetime.utcfromtimestamp(0) time_since_epoch = (date_time - epoch).total_seconds() self.request.setDateTime(date_time.year, date_time.month, date_time.day, date_time.hour, date_time.minute, date_time.second) # has to be set every time after setting datetime (and also AFTER setting arriveby) self.request.setMaxTimeSec(max_time) msg = 'Starting evaluation of routes with ' + time_note + date_time.strftime(DATETIME_FORMAT) print msg results_dt = self.batch_processor.evaluate(self.request) # if there already was a calculation: merge it with new results if do_merge and len(results) > 0: for i, prev_result in enumerate(results[0]): if prev_result is not None: prev_result.merge(results_dt[i]) #write and append if no merging is needed (saves memory) else: search_time = sdf.format(date_time) csv_writer.write(results_dt, additional_columns={'search_time': search_time}, append=True) for r in results_dt: del(r) if do_merge: # flatten the results results = [r for res in results for r in res] csv_writer.write(results, append=False)
def hdfs_setupjob(job, args): """ Similar to the above, but run telemetry data that's already been exported to HDFS. Jobs expect two arguments, startdate and enddate, both in yyyyMMdd format. """ import java.text.SimpleDateFormat as SimpleDateFormat import java.util.Date as Date import java.util.Calendar as Calendar import java.util.concurrent.TimeUnit as TimeUnit import com.mozilla.util.DateUtil as DateUtil import com.mozilla.util.DateIterator as DateIterator import org.apache.hadoop.mapreduce.lib.input.FileInputFormat as FileInputFormat import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat as MyInputFormat if len(args) != 2: raise Exception("Usage: <startdate-YYYYMMDD> <enddate-YYYYMMDD>") # use to collect up each date in the given range class MyDateIterator(DateIterator): def __init__(self): self._list = [] def get(self): return self._list def see(self, aTime): self._list.append(aTime) sdf = SimpleDateFormat(dateformat) sdf_hdfs = SimpleDateFormat(hdfs_dateformat) startdate = Calendar.getInstance() startdate.setTime(sdf.parse(args[0])) enddate = Calendar.getInstance() enddate.setTime(sdf.parse(args[1])) nowdate = Calendar.getInstance() # HDFS only contains the last 2 weeks of data (up to yesterday) startMillis = startdate.getTimeInMillis() endMillis = enddate.getTimeInMillis() nowMillis = nowdate.getTimeInMillis() startDiff = nowMillis - startMillis if TimeUnit.DAYS.convert(startDiff, TimeUnit.MILLISECONDS) > 14: raise Exception("HDFS Data only includes the past 14 days of history. Try again with more recent dates or use the HBase data directly.") endDiff = nowMillis - endMillis if TimeUnit.DAYS.convert(endDiff, TimeUnit.MILLISECONDS) < 1: raise Exception("HDFS Data only includes data up to yesterday. For (partial) data for today, use the HBase data directly.") dates = MyDateIterator() DateUtil.iterateByDay(startMillis, endMillis, dates) paths = [] for d in dates.get(): paths.append(hdfs_pathformat % (sdf_hdfs.format(Date(d)))) job.setInputFormatClass(MyInputFormat) FileInputFormat.setInputPaths(job, ",".join(paths));