def get_occurences(self): rrules = self.context.recurrence starts = IRecurringSequence(RRuleICal(self.context.start(), rrules)) ends = IRecurringSequence(RRuleICal(self.context.end(), rrules)) events = map( lambda start,end:dict( start_date = ulocalized_time(start, False, time_only=None, context=self.context), end_date = ulocalized_time(end, False, time_only=None, context=self.context), start_time = ulocalized_time(start, False, time_only=True, context=self.context), end_time = ulocalized_time(end, False, time_only=True, context=self.context), same_day = event_util.isSameDay(self.context), same_time = event_util.isSameTime(self.context), ), starts, ends ) """ from Products.CMFCore.utils import getToolByName cat = getToolByName(self, 'portal_catalog') events = None cat_item = cat.searchResults(**{'UID':self.context.UID()}) if cat_item: events = map( lambda start,end:dict( start_date = ulocalized_time(start, False, time_only=None, context=self.context), end_date = ulocalized_time(end, False, time_only=None, context=self.context), start_time = ulocalized_time(start, False, time_only=True, context=self.context), end_time = ulocalized_time(end, False, time_only=True, context=self.context), same_day = event_util.isSameDay(self.context), same_time = event_util.isSameTime(self.context), ), cat.getIndexDataForRID(cat_item[0].getRID())['start'], cat.getIndexDataForRID(cat_item[0].getRID())['end'] ) """ return events
def ticket_title_generator(obj): """Generate a title for the ticket, also using event information. """ event = obj ret = { 'title': obj.title, 'eventtitle': '', 'eventstart': '', 'eventend': '' } if ITicketOccurrence.providedBy(event): event = aq_parent(aq_parent(event)) # Traverse to the Occurrence object if IATEvent.providedBy(event): # get the request out of thin air to be able to publishTraverse to # the transient Occurrence object. traverser = OccTravAT(event, getRequest()) elif IDXEvent.providedBy(event): # TODO traverser = OccTravDX(event, getRequest()) else: raise NotImplementedError( u"There is no event occurrence traverser implementation for " u"this kind of object." ) try: event = traverser.publishTraverse(getRequest(), obj.id) except KeyError: # Maybe the ticket occurrence isn't valid anymore because the # event occurence doesn't exist anymore. # Just ignore that case. return ret elif ITicket.providedBy(event): event = aq_parent(event) if IEvent.providedBy(event) or IOccurrence.providedBy(event): acc = IEventAccessor(event) lstart = ulocalized_time( DT(acc.start), long_format=True, context=event ) lend = ulocalized_time( DT(acc.start), long_format=True, context=event ) # XXX: no unicode, store as utf-8 encoded string instead ret = dict( title=u'%s - %s (%s - %s)' % ( safe_unicode(acc.title), safe_unicode(obj.title), lstart, lend, ), eventtitle=acc.title, eventstart=acc.start, eventend=acc.end, ) return ret
def prepare_for_display(context, start, end, whole_day): """ Return a dictionary containing pre-calculated information for building <start>-<end> date strings. Keys are: 'start_date' - date string of the start date 'start_time' - time string of the start date 'end_date' - date string of the end date 'end_time' - time string of the end date 'start_iso' - start date in iso format 'end_iso' - end date in iso format 'same_day' - event ends on the same day 'same_time' - event ends at same time """ # The behavior os ulocalized_time() with time_only is odd. # Setting time_only=False should return the date part only and *not* # the time # # ulocalized_time(event.start(), False, time_only=True, context=event) # u'14:40' # ulocalized_time(event.start(), False, time_only=False, context=event) # u'14:40' # ulocalized_time(event.start(), False, time_only=None, context=event) # u'16.03.2010' # this needs to separate date and time as ulocalized_time does DT_start = DT(start) DT_end = DT(end) start_date = ulocalized_time(DT_start, long_format=False, time_only=None, context=context) start_time = ulocalized_time(DT_start, long_format=False, time_only=True, context=context) end_date = ulocalized_time(DT_end, long_format=False, time_only=None, context=context) end_time = ulocalized_time(DT_end, long_format=False, time_only=True, context=context) same_day = is_same_day(start, end) same_time = is_same_time(start, end) # set time fields to None for whole day events if whole_day: start_time = end_time = None return dict(start_date=start_date, start_time=start_time, start_iso=start.isoformat(), end_date=end_date, end_time=end_time, end_iso=end.isoformat(), same_day=same_day, same_time=same_time)
def update(self): super(CampaignForm, self).update() today = datetime.date.today() date = self.context.start if date is None: date = today start = self.widgets['start'] start.value = (date.year, date.month, date.day) subject = self.widgets['subject'] if not subject.value: value = self.context.subject or \ self.context.Title().decode('utf-8') subject.value = _( u"${subject} ${date}", mapping={ 'subject': value, 'date': ulocalized_time( DateTime(), context=self.context, request=self.request ).lstrip('0')} )
def ulocalized_time(self, time, long_format=None, time_only=None): if time: # no printing times if they were not specified in inputs if time.second() + time.minute() + time.hour() == 0: long_format = False time_str = ulocalized_time(time, long_format, time_only, self.context, 'bika', self.request) return time_str
def purchasable_until_message(self): date = ulocalized_time( queryAdapter(self.context, IBuyablePeriod).expires, long_format=1, context=self.context, request=self.request, ) message = _(u'ticket_purchasable_until_message', default=u'Tickets are purchasable until ${date}', mapping={'date': date}) return message
def __call__(self): dates, data = get_historicresults(self.context) datatable = [] for andate in dates: datarow = {'date': ulocalized_time(andate, 1, None, self.context, 'bika')} for row in data.itervalues(): for anrow in row['analyses'].itervalues(): serie = anrow['title'] datarow[serie] = anrow.get(andate, {}).get('result', '') datatable.append(datarow) return json.dumps(datatable)
def purchasable_as_of_message(self): date = ulocalized_time( queryAdapter(self.context, IBuyablePeriod).effective, long_format=1, context=self.context, request=self.request, ) message = _(u'purchasable_as_of_message', default=u'Item is purchasable as of ${date}', mapping={'date': date}) return message
def toDisplay(event): """ Return dict containing pre-calculated information for building a <start>-<end> date string. Keys are 'start_date' - date string of the start date 'start_time' - time string of the start date 'end_date' - date string of the end date 'end_time' - time string of the end date 'same_day' - event ends on the same day """ # The behavior os ulocalized_time() with time_only is odd. Setting time_only=False # should return the date part only and *not* the time # # ulocalized_time(event.start(), False, time_only=True, context=event) # u'14:40' # ulocalized_time(event.start(), False, time_only=False, context=event) # u'14:40' # ulocalized_time(event.start(), False, time_only=None, context=event) # u'16.03.2010' start_date = ulocalized_time(event.start(), False, time_only=None, context=event) end_date = ulocalized_time(event.end(), False, time_only=None, context=event) start_time = ulocalized_time(event.start(), False, time_only=True, context=event) end_time = ulocalized_time(event.end(), False, time_only=True, context=event) same_day = isSameDay(event) same_time = isSameTime(event) # set time fields to None for whole day events if event.getWholeDay(): start_time = end_time = None return dict( start_date=start_date, start_time=start_time, end_date=end_date, end_time=end_time, same_day=same_day, same_time=same_time, )
def __call__(self): ann = IAnnotations(self.context) last_campaign = ann.get(LAST_CAMPAIGN) if not last_campaign: return None date = last_campaign['date'] if not date: return None last_campaign['date'] = ulocalized_time(date, long_format=True, context=self.context) return last_campaign
def issues(self): context = aq_inner(self.context) path = '/'.join(context.getPhysicalPath()) result = [] ctool = getToolByName(context, 'portal_catalog') for brain in ctool(portal_type='gazette.GazetteIssue', path=path, sort_on='start', sort_order='reverse'): result.append(dict( title=brain.Title, url=brain.getURL(), date=ulocalized_time(brain.start, 1, context=context) )) return result
def update(self): super(NewsletterForm, self).update() today = datetime.date.today() subject = self.widgets['subject'] if type(subject.value) == type(''): subject.value = subject.value.decode('utf-8') if not subject.value: value = self.context.subject or \ self.context.Title().decode('utf-8') subject.value = _( u"${subject} ${date}", mapping={ 'subject': value, 'date': ulocalized_time( DateTime(), context=self.context, request=self.request ).lstrip('0')} ) if not self.context.select_interest_groups: self.widgets['interests'].mode = HIDDEN_MODE
def formatDate(self, adate): try: return safe_unicode( ulocalized_time(adate, long_format=True, context=self.context)) except ValueError: return u'???'
def retractInvalidAnalyses(self): """ Retract the analyses with validation pending status for which the instrument used failed a QC Test. """ toretract = {} instruments = {} refs = [] rc = getToolByName(self.context, REFERENCE_CATALOG) selected = WorkflowAction._get_selected_items(self) for uid in selected.iterkeys(): # We need to do this instead of using the dict values # directly because all these analyses have been saved before # and don't know if they already had an instrument assigned an = rc.lookupObject(uid) if an.portal_type == 'ReferenceAnalysis': refs.append(an) instrument = an.getInstrument() if instrument and instrument.UID() not in instruments: instruments[instrument.UID()] = instrument for instr in instruments.itervalues(): analyses = instr.getAnalysesToRetract() for a in analyses: if a.UID() not in toretract: toretract[a.UID] = a retracted = [] for analysis in toretract.itervalues(): try: # add a remark to this analysis failedtxt = ulocalized_time(DateTime(), long_format=0) failedtxt = '%s: %s' % (failedtxt, _("Instrument failed reference test")) analysis.setRemarks(failedtxt) # retract the analysis doActionFor(analysis, 'retract') retracted.append(analysis) except: # Already retracted as a dependant from a previous one? pass if len(retracted) > 0: # Create the Retracted Analyses List rep = AnalysesRetractedListReport(self.context, self.request, self.portal_url, 'Retracted analyses', retracted) # Attach the pdf to the ReferenceAnalysis (accessible # from Instrument's Internal Calibration Tests list pdf = rep.toPdf() for ref in refs: ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: rep.sendEmail() except: pass # TODO: mostra una finestra amb els resultats publicats d'AS # que han utilitzat l'instrument des de la seva última # calibració vàlida, amb els emails, telèfons dels # contactes associats per a una intervenció manual pass
def ulocalized_time(self, time, long_format=None, time_only=None): return ulocalized_time(time, long_format, time_only, self.context, 'bika', self.request)
def workflow_action_submit(self): uids = self.get_selected_uids() if not uids: message = _('No items selected.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return if not is_active(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return form = self.request.form remarks = form.get('Remarks', [{}])[0] results = form.get('Result', [{}])[0] methods = form.get('Method', [{}])[0] instruments = form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # XXX combine data from multiple bika listing tables. # TODO: Is this necessary? item_data = {} if 'item_data' in form: if type(form['item_data']) == list: for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) # Store invalid instruments-ref.analyses invalid_instrument_refs = dict() # We manually query by all analyses uids at once here instead of using # _get_selected_items from the base class, cause that function fetches # the objects by uid, but sequentially one by one actions_pool = ActionsPool() query = dict(UID=uids, cancellation_state="active") for brain in api.search(query, CATALOG_ANALYSIS_LISTING): uid = api.get_uid(brain) analysis = api.get_object(brain) # Need to save remarks? if uid in remarks: analysis.setRemarks(remarks[uid]) # Need to save the instrument? if uid in instruments: instrument = instruments[uid] or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification if uid not in invalid_instrument_refs: invalid_instrument_refs[uid] = set() invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? if uid in methods: method = methods[uid] or None analysis.setMethod(method) # Need to save the analyst? if uid in analysts: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? analysis.setDetectionLimitOperand(dlimits.get(uid, "")) interims = item_data.get(uid, analysis.getInterimFields()) analysis.setInterimFields(interims) analysis.setResult(results.get('uid', analysis.getResult())) # Add this analysis to the actions pool. We want to submit all them # together, when all have values set for results, interims, etc. actions_pool.add(analysis, "submit") # Submit all analyses actions_pool.resume() # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', cancellation_state='active', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) doActionFor(analysis, 'retract') retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if retracted: # Create the Retracted Analyses List report = AnalysesRetractedListReport(self.context, self.request, self.portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except: pass message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.request.get_header( "referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)
def formatUrls(with_forum=False, with_comment=False): urls = '' for conv_id in rel_convs['convs']: convd = conversations[conv_id] title = convd['conv'].Title() if with_forum: title = '%s: %s'%(convd['forum'].Title(), title) if self.html_format: urls += '<h3><a href="%s%s">%s</a></h3>\n' % (portal.absolute_url(), conv_id, title) else: urls += '%s%s\n' % (portal.absolute_url(), conv_id) if with_comment: comments = '' for comment in rel_convs['cmts'][conv_id]: creatorinfo = portal.portal_membership.getMemberInfo(comment.Creator()) comments += '<li style="padding-left:2em;"><strong>%s %s %s %s</strong><br />\n'%(translate('posted_by', 'ploneboard', context=self.REQUEST, default='Posted by').encode('utf8'), creatorinfo is not None and creatorinfo['fullname'] or comment.Creator(), translate('text_at', 'ploneboard', context=self.REQUEST, default='at').encode('utf8'), ulocalized_time(comment.creation_date, long_format=True, context=self, request=self.REQUEST).encode('utf8')) comments += '%s\n</li>\n'%comment.getText() urls += '<ul>\n%s\n</ul>' % comments return urls
def ulocalized_time(self, *args, **kwargs): kwargs['context'] = self.context return ulocalized_time(*args, **kwargs)
def format_date(date, context): return ulocalized_time(DT(date), long_format=True, time_only=None, context=context)
def get_historicresults(patient): if not patient: return ([], {}) rows = {} dates = [] uid = patient.UID() states = ['verified', 'published'] # Retrieve the AR IDs for the current patient bc = getToolByName(patient, 'bika_catalog') ars = [ar.id for ar \ in bc(portal_type='AnalysisRequest', review_state=states) \ if 'Patient' in ar.getObject().Schema() \ and ar.getObject().Schema().getField('Patient').get(ar.getObject()) \ and ar.getObject().Schema().getField('Patient').get(ar.getObject()).UID() == uid] # Retrieve all the analyses, sorted by ResultCaptureDate DESC bc = getToolByName(patient, 'bika_analysis_catalog') analyses = [an.getObject() for an \ in bc(portal_type='Analysis', getRequestID=ars, sort_on='getResultCaptureDate', sort_order='reverse')] # Build the dictionary of rows for analysis in analyses: ar = analysis.aq_parent sampletype = ar.getSampleType() row = rows.get(sampletype.UID()) if sampletype.UID() in rows.keys() else {'object': sampletype, 'analyses':{}} anrow = row.get('analyses') service = analysis.getService() asdict = anrow.get(service.UID()) if service.UID() in anrow.keys() else {'object': service, 'title': service.Title(), 'keyword': service.getKeyword(), 'units': service.getUnit()} date = analysis.getResultCaptureDate() or analysis.created() date = ulocalized_time(date, 1, None, patient, 'bika') # If more than one analysis of the same type has been # performed in the same datetime, get only the last one if date not in asdict.keys(): asdict[date] = {'object': analysis, 'result': analysis.getResult(), 'formattedresult': analysis.getFormattedResult()} # Get the specs # Only the specs applied to the last analysis for that # sample type will be taken into consideration. # We assume specs from previous analyses are obsolete. if 'specs' not in asdict.keys(): spec = analysis.getAnalysisSpecs() spec = spec.getResultsRangeDict() if spec else {} specs = spec.get(analysis.getKeyword(), {}) if not specs.get('rangecomment', ''): if specs.get('min', '') and specs.get('max', ''): specs['rangecomment'] = '%s - %s' % \ (specs.get('min'), specs.get('max')) elif specs.get('min', ''): specs['rangecomment'] = '> %s' % specs.get('min') elif specs.get('max', ''): specs['rangecomment'] = '< %s' % specs.get('max') if specs.get('error', '0') != '0' and specs.get('rangecomment', ''): specs['rangecomment'] = ('%s (%s' % \ (specs.get('rangecomment'), specs.get('error'))) + '%)' asdict['specs'] = specs if date not in dates: dates.append(date) anrow[service.UID()] = asdict row['analyses'] = anrow rows[sampletype.UID()] = row dates.sort(reverse=False) return (dates, rows)
def created_ulocalized(self): return ulocalized_time(self.created, long_format=True, context=api.get_portal(), request=api.get_request(), domain="senaite.core")