def test_format(self): ''' Take date object and create ISO string from it. This is the reverse test to test_parse. ''' if expectation is None: self.assertRaises(AttributeError, strftime(dt, format)) else: self.assertEqual(strftime(dt, format), expectation)
def resolve_datetime_term(self, term, root_replacer=None): """ """ qr = dict() value = term.get_real_value() path_ = ElasticSearchDialect.create_dotted_path(term, root_replacer) if hasattr(value, "day") and hasattr(value, "hour"): value_formatter = (isodate.DATE_EXT_COMPLETE + "T" + isodate.TIME_EXT_COMPLETE) elif hasattr(value, "day"): value_formatter = isodate.DATE_EXT_COMPLETE elif hasattr(value, "hour"): value_formatter = isodate.TIME_EXT_COMPLETE else: raise ValueError("Could not understand date query value.") if term.comparison_operator in (OPERATOR.eq, OPERATOR.ne): qr["range"] = { path_: { ES_PY_OPERATOR_MAP[OPERATOR.ge]: isodate.strftime(value, value_formatter), ES_PY_OPERATOR_MAP[OPERATOR.le]: isodate.strftime(value, value_formatter), } } elif term.comparison_operator in ( OPERATOR.le, OPERATOR.lt, OPERATOR.ge, OPERATOR.gt, ): qr["range"] = { path_: { ES_PY_OPERATOR_MAP[term.comparison_operator]: isodate.strftime(value, value_formatter) } } if hasattr(value, "tzinfo") and value.tzinfo: timezone = isodate.tz_isoformat(value) if timezone not in ("", "Z"): qr["range"][path_]["time_zone"] = timezone if (term.comparison_operator != OPERATOR.ne and term.unary_operator == OPERATOR.neg) or (term.comparison_operator == OPERATOR.ne and term.unary_operator != OPERATOR.neg): unary_operator = OPERATOR.neg else: unary_operator = OPERATOR.pos return qr, unary_operator
def _serialize(self, value, attr, data, **kwargs): """ An implementation of _serialize. It is not guaranteed to return the same string as was input, if ground_from has been used! """ return isodate.strftime(value, "P%P")
def display_if_datetime(value): """datetime values will be displayed in ckanext.switzerland.date_picker_format """ try: if isinstance(value, datetime): return isodate.strftime(value, DATE_FORMAT) except Exception: return None
def format_iso8601_datetime(dt): """ Format a datetime instance as an iso8601 string. @type dt: datetime.datetime instance @param dt: datetime instance to format @rtype: str @return: iso8601 representation of the passed in datetime instance """ return isodate.strftime(dt, isodate.DT_EXT_COMPLETE)
def display_if_timestamp(value): """timestamps will be displayed in ckanext.switzerland.date_picker_format """ try: dt = datetime.fromtimestamp(int(value)) if isinstance(dt, datetime): return isodate.strftime(dt, DATE_FORMAT) except Exception: return None
def default(self, obj): if isinstance(obj, datetime): # return obj.strftime('%Y-%m-%d %H:%M:%S.%f%Z') return isodate.strftime(obj, "%Y-%m-%d %H:%M:%S.%f%Z") elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') # elif isinstance(obj, MeObject): # return json.dumps(obj, cls=MeEncoder) else: return json.JSONEncoder.default(self, obj)
def search(request): # get the query string from the GET parameters q = request.GET.get('q', None) # if there is no query string, return empty result if q is None: return JsonResponse({}) # search for the query string on youtube # docs: https://developers.google.com/youtube/v3/docs/search/list search_page = requests.get( 'https://www.googleapis.com/youtube/v3/search', params={ 'part': 'snippet', # resource type being requested [required] 'key': YOUTUBE_DATA_API_KEY, # authenticate with api key 'q': q, # query term to search for 'safeSearch': 'none', # don't filter search results 'type': 'video', # resource types, e.g. 'video,playlist,channel' }) # get the list of video ids of the search results videoIds = [] for item in search_page.json()['items']: videoIds.append(item['id']['videoId']) # get details about each of the search results details_page = requests.get( 'https://www.googleapis.com/youtube/v3/videos', params={ 'part': 'contentDetails', # resource type being requested [required] 'key': YOUTUBE_DATA_API_KEY, # authenticate with api key 'id': ','.join(map( str, videoIds)), # search result ids as comma separated list }) # combine search items, detail items, computed time strings into one list items = [] for search_item, detail_item in zip(search_page.json()['items'], details_page.json()['items']): iso_duration = detail_item['contentDetails']['duration'] time_delta = parse_duration(iso_duration) time_string = strftime(time_delta, '%H:%M:%S') items.append({ 'id': search_item['id']['videoId'], 'timeString': time_string, 'snippet': search_item['snippet'], 'contentDetails': detail_item['contentDetails'], }) # return combined search results as JSON return JsonResponse({ 'items': items, })
def generateTechnicalMetaString(data, mediaType, convertTime): # create root element and tree object root = etree.Element('technical') tech_meta = etree.ElementTree(root) # technical/format/fileName etree.SubElement(root, 'fileName').text = data['FileName'] # create format element to hold mimeType and compression subelements format = etree.SubElement(root, 'format') # create media {audio,video} and duration subelement media = etree.SubElement(root, mediaType) etree.SubElement(media, 'duration').text = isodate.strftime( convertTime(data['DurationDerivatives']), "%H:%M:%S") if mediaType == 'audio': etree.SubElement(format, 'mimeType').text = "audio/mpeg" etree.SubElement(format, 'compression').text = "lossy" etree.SubElement(media, 'channels').text = data['TrackFormat'] # create sound container element specific to audio objects sound = etree.SubElement(media, 'audioTrack') elif mediaType == 'video': etree.SubElement(format, 'mimeType').text = "video/x-m4v" etree.SubElement(format, 'compression').text = "lossy" # create sound container element specific to video objects sound = etree.SubElement(media, 'videoSound') # create top-level video elements if 'Color' in data and data['Color']: etree.SubElement(media, 'color').text = data['Color'] if 'DataRate' in data and data['DataRate']: dataRate = etree.SubElement(media, 'dataRate') d = data['DataRate'].split(" ") dataRate.text = d[0] dataRate.set('rate', d[1]) if 'FrameRate' in data and data['FrameRate']: frame = etree.SubElement(media, 'frame') frame.text = data['FrameRate'] frame.set('rate', 'second') # create video format element and subelements if 'ScanSignal' in data or 'VideoStandard' in data: videoFormat = etree.SubElement(media, 'videoFormat') if data['ScanSignal']: etree.SubElement(videoFormat, 'scanSignal').text = data['ScanSignal'] if data['VideoStandard']: etree.SubElement(videoFormat, 'videoStandard').text = data['VideoStandard'] # create videoResolution and subelement only if all three are present if all (k in data for k in ('AspectRatio','HorizontalPixels','VerticalPixels')): videoRes = etree.SubElement(media, 'videoResolution') etree.SubElement(videoRes, 'aspectRatio').text = data['AspectRatio'] etree.SubElement(videoRes, 'horizontalPixels').text = data['HorizontalPixels'] etree.SubElement(videoRes, 'verticalPixels').text = data['VerticalPixels'] # populate the sound container element if 'Mono/Stereo' in data and data['Mono/Stereo']: etree.SubElement(sound, 'soundField').text = data['Mono/Stereo'] if 'Language' in data and data['Language']: etree.SubElement(sound, 'language').text = data['Language'] return etree.tostring(tech_meta, pretty_print=True)
def format_iso8601_interval(interval, start_time=None, recurrences=None): """ Format a time interval as an iso8601 string. @type interval: datetime.timedelta instance @param interval: length of the interval @type start_time: datetime.datetime instance or None @param start_time: (optional) start time of the interval @type recurrences: int @param recurrences: (optional) number of times intercal recures @rtype: str @return: iso8601 representation of the passed in time interval """ parts = [] if recurrences is not None: parts.append('R%d' % recurrences) if start_time is not None: parts.append(format_iso8601_datetime(start_time)) parts.append(isodate.strftime(interval, isodate.D_DEFAULT)) return '/'.join(parts)
def format_iso8601_interval(interval, start_time=None, recurrences=None): """ Format a time interval as an iso8601 string. @type interval: datetime.timedelta instance @param interval: length of the interval @type start_time: datetime.datetime instance or None @param start_time: (optional) start time of the interval @type recurrences: int @param recurrences: (optional) number of times intercal recures @rtype: str @return: iso8601 representaion of the passed in time interval """ parts = [] if recurrences is not None: parts.append('R%d' % recurrences) if start_time is not None: parts.append(format_iso8601_datetime(start_time)) parts.append(isodate.strftime(interval, isodate.D_DEFAULT)) return '/'.join(parts)
def get_youtube_info(target, vid): global last_vids if (not (target in last_vids)): last_vids[target] = collections.deque(5*[''], 5) if (vid in last_vids[target]): return None last_vids[target].pop() last_vids[target].appendleft(vid) entry = yt_service.list(part='snippet,statistics,contentDetails', id=vid, maxResults=1).execute()['items'][0] ret = u'[YouTube]: %s' % (entry['snippet']['title']) if ('duration' in entry['contentDetails']) and (len(entry['contentDetails']['duration']) > 0): ret += u' [%s]' % (isodate.strftime(isodate.parse_duration(entry['contentDetails']['duration']), '%H:%M:%S')) ret += u' (Views: %s)' % entry['statistics']['viewCount'] return sanitize(ret)
def _value(self): if self.raw_data: return ' '.join(self.raw_data) else: return self.data and isodate.strftime(self.data, '%H:%M') or ''
async def test_fhir_date_param(es_requester): """ """ async with es_requester as requester: container, request, txn, tm = await setup_txn_on_container(requester ) # noqa # init primary data await init_data(requester) engine = query_utility(IElasticsearchEngineFactory).get() index_name = await engine.get_index_name(container) conn = engine.connection.raw_connection await conn.indices.refresh(index=index_name) context = query_utility(ISearchContextFactory).get("Organization", unrestricted=True) factory = query_utility(IFhirSearch) # test:1 equal to params = (("_lastUpdated", "2010-05-28T05:35:56+00:00"), ) bundle = await factory(params, context=context) # result should contains only item assert len(bundle.entry) == 1 assert bundle.entry[0].resource.id == "f001" # test:2 not equal to params = (("_lastUpdated", "ne2015-05-28T05:35:56+00:00"), ) bundle = await factory(params, context=context) # result should contains two items assert len(bundle.entry) == 2 # test:3 less than now_iso_time = isodate.strftime(datetime.datetime.utcnow(), isodate.DT_EXT_COMPLETE) params = (("_lastUpdated", "lt" + now_iso_time + "+00:00"), ) bundle = await factory(params, context=context) # result should contains three items, all are less than current time assert bundle.total == 3 # test:4 less than or equal to params = (("_lastUpdated", "le2015-05-28T05:35:56+00:00"), ) bundle = await factory(params, context=context) # result should contains two items, # 2010-05-28T05:35:56+00:00 + 2015-05-28T05:35:56+00:00 assert bundle.total == 2 # test:5 greater than params = (("_lastUpdated", "gt2015-05-28T05:35:56+00:00"), ) bundle = await factory(params, context=context) # result should contains only item assert len(bundle.entry) == 1 assert bundle.entry[0].resource.id == "f003" # test:6 greater than or equal to params = (("_lastUpdated", "ge2015-05-28T05:35:56+00:00"), ) bundle = await factory(params, context=context) # result should contains only item assert len(bundle.entry) == 2 return # ** Issue: 21 ** context = query_utility(ISearchContextFactory).get("Task", unrestricted=True) # test IN/OR params = (("authored-on", "2017-08-05T06:16:41+00:00,ge2018-08-05T06:16:41+00:00"), ) bundle = factory(params, context=context) # should be two assert len(bundle.entry) == 2 params = (("authored-on", "2017-05-07T07:42:17+00:00,2019-08-05T06:16:41+00:00"), ) bundle = factory(params, context=context) # Although 2019-08-05T06:16:41 realy does not exists but OR # feature should bring One assert len(bundle.entry) == 1 params = ( ("authored-on", "lt2018-08-05T06:16:41+00:00,gt2017-05-07T07:42:17+00:00"), ) bundle = factory(params, context=context) # Keep in mind OR feature! not and that's why expected result 3 not 1 because assert bundle.total == 3
def createUMDM(data, batch, summedRunTime, mets): timeStamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ") # Initialize the output starting with the specified template file outputfile = batch['umdm'] # Strip out trailing quotation marks from Dimensions field if data['Dimensions'].endswith('"'): data['Dimensions'] = data['Dimensions'][0:-1] # Generate Agents (Creator, Contributor, Provider) agents = {} if data['Creator'] and data['Creator'] is not None: agents['creator'] = (data['Creator'], data['CreatorType']) if data['Contributor'] and data['Contributor'] is not None: agents['contributor'] = (data['Contributor'], data['ContributorType']) if data['Provider/Publisher'] and data['Provider/Publisher'] is not None: agents['provider'] = (data['Provider/Publisher'], data['Provider/PublisherType']) agentsString = generateAgentsString(**agents) # Generate dating tags dateTagString = generateDateTag(data['DateCreated'], data['DateAttribute'], data['Century']) # Generate browse terms browseTermsString = generateBrowseTerms(data['RepositoryBrowse']) # Generate topical subjects topicalSubjects = generateTopicalSubjects( pers=(data['PersonalSubject'], data['PersonalScheme']), corp=(data['CorpSubject'], data['CorpScheme']), top=(data['TopicalSubject'], data['TopicalScheme']), geog=(data['GeographicalSubject'], data['GeographicalScheme']), dec=(data['AlbumDecade'], ''), alb=(data['AlbumBrowse'], 'AlbUM') ) # Generate MediaType XML Tags mediaTypeString = generateMediaTypeTag(data['MediaType'], data['FormType'], data['Form']) # Generate Archival Location Information Tags archivalLocation = generateArchivalLocation(collection=data['ArchivalCollection'], series=data['Series'], subseries=data['Subseries'], box=data['Box'], item=data['Item'], accession=data['Accession'] ) # Insert the RELS-METS section compiled from the UMAM files outputfile = outputfile.replace('!!!INSERT_METS_HERE!!!', mets) # Insert the METS outputfile = stripAnchors(outputfile) # Strip out anchor points # XML tags with which to wrap the CSV data XMLtags = { '!!!ContentModel!!!' : { 'open' : '<type>', 'close' : '</type>' }, '!!!Status!!!' : { 'open' : '<status>', 'close' : '</status>' }, '!!!Title!!!' : { 'open' : '<title type="main">', 'close' : '</title>' }, '!!!AlternateTitle!!!' : { 'open' : '<title type="alternate">', 'close' : '</title>'}, '!!!Identifier!!!' : { 'open' : '<identifier>', 'close' : '</identifier>'}, '!!!Description/Summary!!!' : { 'open' : '<description type="summary">', 'close' : '</description>'}, '!!!Rights!!!' : { 'open' : '<rights type="access">', 'close' : '</rights>'}, '!!!CopyrightHolder!!!' : { 'open' : '<rights type="copyrightowner">', 'close' : '</rights>'}, '!!!Continent!!!' : { 'open' : '<geogName type="continent">', 'close' : '</geogName>'}, '!!!Country!!!' : { 'open' : '<geogName type="country">', 'close' : '</geogName>'}, '!!!Region/State!!!' : { 'open' : '<geogName type="region">', 'close' : '</geogName>'}, '!!!Settlement/City!!!' : { 'open' : '<geogName type="settlement">', 'close' : '</geogName>'}, '!!!Repository!!!' : { 'open' : '<repository><corpName>', 'close' : '</corpName></repository>'}, '!!!Dimensions!!!' : { 'open' : '<size units="in">', 'close' : '</size>'}, '!!!DurationMasters!!!' : { 'open' : '<extent units="{0}">'.format( batch['timeUnits']), 'close' : '</extent>'}, '!!!Format!!!' : { 'open' : '<format>', 'close' : '</format>'}, '!!!ArchivalLocation!!!' : { 'open' : '<bibRef>', 'close' : '</bibRef>'}, '!!!Language!!!' : { 'open' : '<language>', 'close' : '</language>'} } # Create mapping of the metadata onto the UMDM XML template file umdmMap = { '!!!PID!!!' : data['PID'], '!!!ContentModel!!!' : 'UMD_VIDEO', '!!!Status!!!' : batch['rightsScheme']['doInfoStatus'], '!!!Title!!!' : data['Title'], '!!!AlternateTitle!!!' : data['AlternateTitle'], '!!!Agents!!!' : agentsString, '!!!Identifier!!!' : data['Identifier'], '!!!Description/Summary!!!' : data['Description/Summary'], '!!!Rights!!!' : data['Rights'], '!!!CopyrightHolder!!!' : data['CopyrightHolder'], '!!!MediaType/Form!!!' : mediaTypeString, '!!!Continent!!!' : data['Continent'], '!!!Country!!!' : data['Country'], '!!!Region/State!!!' : data['Region/State'], '!!!Settlement/City!!!' : data['Settlement/City'], '!!!InsertDateHere!!!' : dateTagString, '!!!Language!!!' : data['Language'], '!!!Dimensions!!!' : data['Dimensions'], '!!!DurationMasters!!!' : isodate.strftime(summedRunTime, "%H:%M:%S"), '!!!Format!!!' : data['Format'], '!!!RepositoryBrowse!!!' : browseTermsString, '!!!Repository!!!' : data['Department'], '!!!TopicalSubjects!!!' : topicalSubjects, '!!!ArchivalLocation!!!' : archivalLocation, '!!!CollectionPID!!!' : batch['collectionPID'], '!!!TimeStamp!!!' : timeStamp, '!!!TopicalSubjects!!!' : topicalSubjects } # Carry out a find and replace for each line of the data mapping # and convert ampersands to XML entities in the process for k, v in umdmMap.items(): if k in XMLtags.keys(): # If there is an XML tag available if v != '': # and if the data point is not empty # wrap the data point with the XML tag and insert it in the template myTag = XMLtags[k]['open'] + v.replace('&', '&') + XMLtags[k]['close'] outputfile = outputfile.replace(k, myTag) else: # if the data is empty, get rid of the anchor point outputfile = outputfile.replace(k, '') else: # but if there is no xml tag available, simply replace anchor with value outputfile = outputfile.replace(k, v.replace('&', '&')) return outputfile
def print_duration(video_infos: JsonType) -> None: total_duration = reduce( operator.add, [video.duration for video in video_infos.values()]) print('\n' * 2) print('Total duration of playlist is {}'.format( strftime(total_duration, '%H:%M')))