def calcCoefficient(data,listA,listW,listLostFunction): N=len(data[0]) #维度 w=[0 for in range(N)] wNew=[0 for i in range(N)] g=[0 for i in range(N)] times=0 alpha=100.0 #学习率随意初始化 while times<10000: j=0 while j<N: g[j]=gradient(data,w,j) j+=1 normalize(g) #正则化梯度 alpha=calcAlpha(w,g,alpha,data) numberProduct(alpha,g,wNew) print "times,alpha,fw,w,g:\t",times,alpha,fw(w,data),w,g if isSame(w,wNew): break assign2(2,wNew) #更新权值 times +=1 listA.append(alpha) listW.append(assign(w)) listLostFunction.append(fw(w,data)) return w
def get_locale(s, enc=None): s.replace('-','_') fields = s.split('-', 1) if len(fields) == 1: return locale.normalize(s) #nothing after the first dash if enc: return locale.normalize('_'.join(fields) + '.' + enc) return locale.normalize('_'.join(fields))
def check(data): # Check that all alias definitions from the X11 file # are actually mapped to the correct alias locales. errors = 0 for k, v in data.items(): if locale.normalize(k) != v: print('ERROR: %a -> %a != %a' % (k, locale.normalize(k), v), file=sys.stderr) errors += 1 return errors
def guess_locale_from_lang_posix(lang): # compatibility v6.0.4 if is_valid_locale(str(lang)): locale_n = str(lang) else: # this works in Travis when locale support set by Travis suggestion locale_n = str((locale.normalize(lang).split('.')[0]) + '.utf8') if not is_valid_locale(locale_n): # http://thread.gmane.org/gmane.comp.web.nikola/337/focus=343 locale_n = str((locale.normalize(lang).split('.')[0])) if not is_valid_locale(locale_n): locale_n = None return locale_n
def __init__(self, langcode): """LanguageCode Constructor. Accept three forms of language code: xx, xx_XX, xx-XX, case insensitive. """ # Cut the encoding part and support the form of xx-xx. langcode = langcode.split('.')[0].replace('-', '_') # Only the legal forms can be recongized and changed by encoding suffix. if langcode == locale.normalize(langcode): self._langcode = None else: self._langcode = locale.normalize(langcode).split('.')[0]
def render(self, context, instance, placeholder): context['instance'] = instance context['long_language_code'] = locale.normalize(get_language()).split('.')[0] context['facebook_app_id'] = getattr(settings, 'FACEBOOK_APP_ID', None) context['facebook_color_scheme'] = getattr(settings, 'FACEBOOK_COLOR_SCHEME', None) context['facebook_font'] = getattr(settings, 'FACEBOOK_FONT', None) return context
def __init__(self, dt, convert=False): self.dt = dt if convert and sickrage.app.config.timezone_display == 'local': self.dt = dt.astimezone(sickrage.app.tz) self.has_locale = True self.en_US_norm = locale.normalize('en_US.utf-8')
def get_locale_conv(loc=None): if loc is None: loc = to_locale(get_language()) startloc = loc # '-' is a language delimiter, not a locale, but people often mess that up if loc.find("-") > -1: loc = to_locale(loc) try: # log.debug('setting locale: %s', loc.encode('utf-8')) locale.setlocale(locale.LC_ALL, locale.normalize(loc)) return locale.localeconv() except (locale.Error, ValueError): # darn, try a different path pos = loc.find("_") if pos > -1: loc = loc[:pos] return get_locale_conv(loc) else: loc = to_locale(settings.LANGUAGE_CODE) if loc != startloc and loc[: loc.find("_")] != startloc: log.warn( u"Cannot set locale to '%s'. Using default locale '%s'.", startloc.encode("utf-8"), loc.encode("utf-8"), ) return get_locale_conv(loc) else: log.fatal(u"Cannot set locale to default locale '%s'. Something is misconfigured.", loc.encode("utf-8")) raise ImproperlyConfigured("bad settings.LANGUAGE_CODE")
def _set_lang_locale (lang): if lang is None: return lambda: True # Get possible locales from explicit mapping, # try auto-resolution as lower priority (not very reliable). nlocnames = _lang_to_locale.get(lang, []) nlocname_auto = locale.normalize(lang + ".UTF-8") if nlocname_auto not in nlocnames: nlocnames.append(nlocname_auto) # Try to set one of the locales. oldloc = locale.getlocale() setlocname = None for nlocname in nlocnames: try: setlocname = locale.setlocale(locale.LC_ALL, nlocname) break except: pass if setlocname is None and lang not in _no_locale_warning_issued: warning(p_("error message", "cannot find a locale for language '%(lang)s', " "tried: %(locales)s") % dict(lang=lang, locales=" ".join(nlocnames))) _no_locale_warning_issued[lang] = True locale.setlocale(locale.LC_ALL, oldloc) # Return reset function. return lambda: locale.setlocale(locale.LC_ALL, oldloc)
def hyphenate(value, arg=None, autoescape=None): # Default minimal length minlen = 6 if arg: args = arg.split(u',') code = args[0] # Override minimal length, if specified if len(args) > 1: minlen = int(args[1]) else: # No language specified, use Django's current code = get_language() # Normalize the locale code, ignoring a potential encoding suffix lang = locale.normalize(code).split('.')[0] # Make sure the proper language is installed if not dictools.is_installed(lang): dictools.install(lang) h = Hyphenator(lang) new = [] for word in value.split(u' '): if len(word) > minlen and word.isalpha(): new.append(u'­'.join(h.syllables(word))) else: new.append(word) result = u' '.join(new) return mark_safe(result)
def list_languages(): """ Return sorted list of (lang-code, lang-string) pairs, representing the available languages. When any language file is found, the default tuple ('en', 'English') will be included. Otherwise an empty list is returned. """ # Findst find all the MO files. # Each folder should also contain a dummy text file giving the language # Example: # <localedir>/nl/LC_MESSAGES/SABnzbd.mo # <localedir>/nl/LC_MESSAGES/Nederlands lst = [] for path in glob.glob(os.path.join(_LOCALEDIR, '*')): if os.path.isdir(path) and not path.endswith('en'): lngname = os.path.basename(path) lng = locale.normalize(lngname) # Example: 'pt_BR.ISO8859-1' lng_short = lng[:lng.find('_')] lng_full = lng[:lng.find('.')] # First try full language string, e.g. 'pt_BR' language = LanguageTable.get(lng_full, (lng_full, lng_full)) if language[0] == lng_full: # Full language string not defined: try short form, e.g. 'pt' language = LanguageTable.get(lng_short, (lng_short, lng_short)) lng = lng_short else: lng = lng_full language = language[1].decode('utf-8') lst.append((lng, language)) if lst: lst.append(('en', 'English')) return sorted(lst, key=operator.itemgetter(1)) else: return lst
def create( cls, locale=None, providers=None, generator=None, includes=None, **config): if includes is None: includes = [] # fix locale to package name locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE locale = pylocale.normalize(locale).split('.')[0] if locale not in AVAILABLE_LOCALES: msg = 'Invalid configuration for faker locale `{0}`'.format(locale) raise AttributeError(msg) config['locale'] = locale providers = providers or PROVIDERS providers += includes faker = generator or Generator(**config) for prov_name in providers: if prov_name == 'faker.providers': continue prov_cls, lang_found = cls._get_provider_class(prov_name, locale) provider = prov_cls(faker) provider.__provider__ = prov_name provider.__lang__ = lang_found faker.add_provider(provider) return faker
def get_request_data(self, request): data = { 'url': request.build_absolute_uri(), 'locale': locale.normalize(request.LANGUAGE_CODE.replace('-', '_')).split('.')[0], 'title': settings.DEFAULT_TITLE or get_current_site(request).name, } return data
def view_videos(request): """Gives an overview of the whole available video gallery""" try: # we try to set the current locale to something that is better newloc = locale.normalize(request.LANGUAGE_CODE.replace('-','_')+'.utf8') locale.setlocale(locale.LC_ALL, newloc) except locale.Error: # we simply ignore otherwise, and leave it be pass playlists = YouTubePlayList.objects.all() entries = [] try: for p in playlists: entries += p.sorted except socket.gaierror: pass #working offline? entries.sort(reverse=True) paginator = Paginator(entries, 4) # Make sure page request is an int. If not, deliver first page. try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 # If page request (9999) is out of range, deliver last page of results. try: now = paginator.page(page) except (EmptyPage, InvalidPage): now = paginator.page(paginator.num_pages) return render_to_response('youtube_gallery.html', {'objects': now,}, context_instance=RequestContext(request))
def view_agenda(request): """Gives an overview of the whole agenda""" try: # we try to set the current locale to something that is better newloc = locale.normalize(request.LANGUAGE_CODE.replace('-','_')+'.utf8') locale.setlocale(locale.LC_ALL, newloc) except locale.Error: # we simply ignore otherwise, and leave it be pass account = Calendar.objects.get(id=1) entries = [] feed = None try: entries = [CalendarEntry(k) for k in account.feed.entry] feed = account.feed except socket.gaierror: pass #working offline? return render_to_response('agenda_entries.html', { 'feed': feed, 'entries': sorted(entries) }, context_instance=RequestContext(request))
def send_subscribers_email(comment): BLANGO_URL = getattr(settings, 'BLANGO_URL') BLANGO_TITLE = getattr(settings, 'BLANGO_TITLE') recipients = comment.entry.comment_set.filter(subscribed=True).exclude(pk=comment.pk) lang = get_language() newlang = normalize(comment.entry.language.iso639_1 + '.UTF-8') activate(newlang) subject = _('New comment on %s') % comment.entry.title from_email = 'noreply@%s' % hostname_from_uri(BLANGO_URL, http=False) for r in recipients: message = _(''' A new comment has been posted in %(entry)s. You can read it at %(uri)s You're receiving this email because you subscribed to "%(entry)s" in %(blog_title)s. If you no longer want to received any followups just visit %(unsubscribe)s ''') % { 'entry': comment.entry.title, 'uri': comment.get_absolute_url(), 'blog_title': BLANGO_TITLE, 'unsubscribe': BLANGO_URL + 'unsubscribe/%s/%s/' % \ (r.id, sha1(r.author_email).hexdigest()) } send_mail(subject, message, from_email, [r.author_email], fail_silently=True) activate(lang)
def process_order_confirmed(self, request, order): POSTFINANCE = settings.POSTFINANCE if not order.balance_remaining: return self.already_paid(order) logger.info('Processing order %s using Postfinance' % order) payment = self.create_pending_payment(order) self.create_transactions(order, _('payment process reservation'), type=StockTransaction.PAYMENT_PROCESS_RESERVATION, negative=True, payment=payment) form_params = { 'orderID': 'Order-%d-%d' % (order.id, payment.id), 'amount': u'%s' % int(order.balance_remaining.quantize(Decimal('0.00'))*100), 'currency': order.currency, 'PSPID': POSTFINANCE['PSPID'], 'mode': POSTFINANCE['LIVE'] and 'prod' or 'test', } form_params['SHASign'] = sha1(u''.join(( form_params['orderID'], form_params['amount'], form_params['currency'], form_params['PSPID'], POSTFINANCE['SHA1_IN'], ))).hexdigest() return self.shop.render(request, 'payment/postfinance_form.html', { 'order': order, 'HTTP_HOST': request.META.get('HTTP_HOST'), 'form_params': form_params, 'locale': locale.normalize(to_locale(get_language())).split('.')[0], })
def get_country(self, lang): "@TODO not politically correct" if '-' in lang.id: return lang.id.split('-')[1] norm = locale.normalize(lang.id).split('_') if len(norm) > 1: return norm[1].split('.')[0].lower() return lang.id
def SetLanguage(): import locale language = os.getenv('LANG') if not language: return language = language.split('.')[0] # Split off ignored .encoding part if present orig_language = language try: locale.setlocale(locale.LC_ALL, language) except locale.Error, e: if sys.platform != 'win32': # Don't try on Windows, it will probably not work # sys.stderr.write("Failed to set LC_ALL to %s (%s)\n" % (language, e)) try: # Locale lang.encoding might be missing. Let's try # UTF-8 encoding before giving up as on Linux systems # lang.UTF-8 locales are more common than legacy # ISO-8859 ones. language = locale.normalize('%s.UTF-8' % (language)) locale.setlocale(locale.LC_ALL, language) except locale.Error, e: # If we got so far, provided locale is not supported # on this system sys.stderr.write("Failed to set LC_ALL to %s (%s)\n" % (language, e)) ### locale.getdefaultlocale() is probably related to gettext? # try: # default_locale = locale.getdefaultlocale() # except: # default_locale = None # if default_locale and default_locale[0]: # language = default_locale[0] # else: language = 'C'
def render(self, context, instance, placeholder): context["instance"] = instance context["long_language_code"] = locale.normalize(get_language()).split(".")[0] context["facebook_app_id"] = getattr(settings, "FACEBOOK_APP_ID", None) context["facebook_color_scheme"] = getattr(settings, "FACEBOOK_COLOR_SCHEME", None) context["facebook_font"] = getattr(settings, "FACEBOOK_FONT", None) return context
def detect_user_locale(): import locale if renpy.windows: import ctypes windll = ctypes.windll.kernel32 locale_name = locale.windows_locale.get(windll.GetUserDefaultUILanguage()) elif renpy.android: from jnius import autoclass Locale = autoclass('java.util.Locale') locale_name = str(Locale.getDefault().getLanguage()) elif renpy.ios: import pyobjus NSLocale = pyobjus.autoclass("NSLocale") languages = NSLocale.preferredLanguages() locale_name = languages.objectAtIndex_(0).UTF8String().decode("utf-8") locale_name.replace("-", "_") else: locale_name = locale.getdefaultlocale() if locale_name is not None: locale_name = locale_name[0] if locale_name is None: return None, None normalize = locale.normalize(locale_name) if normalize == locale_name: language = region = locale_name else: locale_name = normalize if '.' in locale_name: locale_name, _ = locale_name.split('.', 1) language, region = locale_name.lower().split("_") return language, region
def normalize_language(langcode): langcode = langcode.replace('-', '_') localename = normalize(langcode) if not '.' in localename: # This is not a valid or recognized language name return None, None return localename.split(".")
def list_view(request, state, year=None, month=None, day=None, future=None): if not request.user.vvsuser.can_moderate: raise PermissionDenied setlocale(LC_TIME, normalize('nl_NL')) qs = Reservation.objects.filter(state=state) if year is not None and month is not None: if day is None: firstDOM = datetime.date(int(year), int(month), 1) lastDOM = firstDOM + datetime.timedelta(days=31) while firstDOM.month != lastDOM.month: lastDOM -= datetime.timedelta(days=1) qs = qs.filter(date__range=(firstDOM, lastDOM)) humanperiod = firstDOM.strftime(" in %B %Y") else: date = datetime.date(int(year), int(month), int(day)) qs = qs.filter(date=date) humanperiod = date.strftime(" op %d %B %Y") elif future is not None: date = datetime.date.today() qs = qs.filter(date__gte=date) humanperiod = date.strftime(" vanaf %d %B %Y") else: humanperiod = "" reservations = list(qs.order_by('date')) humanstate = states_dict[state] return render_to_response('list.html', {'humanperiod': humanperiod, 'humanstate': humanstate, 'reservations': reservations, 'states': states}, context_instance=RequestContext(request))
def localeNameToWindowsLCID(localeName): """Retreave the Windows locale identifier (LCID) for the given locale name @param localeName: a string of 2letterLanguage_2letterCountry or or just 2letterLanguage @type localeName: string @returns: a Windows LCID or L{LCID_NONE} if it could not be retrieved. @rtype: integer """ #Windows Vista is able to convert locale names to LCIDs func_LocaleNameToLCID=getattr(ctypes.windll.kernel32,'LocaleNameToLCID',None) if func_LocaleNameToLCID is not None: localeName=localeName.replace('_','-') LCID=func_LocaleNameToLCID(unicode(localeName),0) # #6259: In Windows 10, LOCALE_CUSTOM_UNSPECIFIED is returned for any locale name unknown to Windows. # This was observed for Aragonese ("an"). # See https://msdn.microsoft.com/en-us/library/system.globalization.cultureinfo.lcid(v=vs.110).aspx. if LCID==LOCALE_CUSTOM_UNSPECIFIED: LCID=LCID_NONE else: #Windows doesn't have this functionality, manually search Python's windows_locale dictionary for the LCID localeName=locale.normalize(localeName) if '.' in localeName: localeName=localeName.split('.')[0] LCList=[x[0] for x in locale.windows_locale.iteritems() if x[1]==localeName] if len(LCList)>0: LCID=LCList[0] else: LCID=LCID_NONE return LCID
def social_get_facebook_locale(locale): """ Normalize the locale string and split the value needed for the api url """ if locale is None: return 'en_US' return normalize(locale).split('.')[0]
def guess_locale_from_lang_posix(lang): # compatibility v6.0.4 if is_valid_locale(str(lang)): locale_n = str(lang) else: # this works in Travis when locale support set by Travis suggestion locale_n = str((locale.normalize(lang).split('.')[0]) + '.utf8') return locale_n
def set_locale(request): try: # we try to set the current locale to something that is better newloc = locale.normalize(request.LANGUAGE_CODE.replace('-','_')+'.utf8') locale.setlocale(locale.LC_ALL, newloc) except locale.Error: # we simply ignore otherwise, and leave it be pass
def process_order_confirmed(self, request, order): OGONE = settings.OGONE if not order.balance_remaining: return self.already_paid(order) logger.info('Processing order %s using Ogone' % order) payment = self.create_pending_payment(order) if plata.settings.PLATA_STOCK_TRACKING: self.create_transactions( order, _('payment process reservation'), type=StockTransaction.PAYMENT_PROCESS_RESERVATION, negative=True, payment=payment) # params that will be hashed form_params = { 'PSPID': OGONE['PSPID'], 'orderID': 'Order-%d-%d' % (order.id, payment.id), 'amount': u'%s' % int(order.balance_remaining.quantize(Decimal('0.00'))*100), 'currency': order.currency, 'language': locale.normalize(to_locale(get_language())).split('.')[0], 'CN': u'%s %s' % (order.billing_first_name, order.billing_last_name), 'EMAIL': order.email, 'ownerZIP': order.billing_zip_code, 'owneraddress': order.billing_address, 'ownertown': order.billing_city, 'accepturl': u'http://%s%s' % ( request.META.get('HTTP_HOST'), reverse('plata_order_success')), 'declineurl': u'http://%s%s' % ( request.META.get('HTTP_HOST'), reverse('plata_order_payment_failure')), 'exceptionurl': u'http://%s%s' % ( request.META.get('HTTP_HOST'), reverse('plata_order_payment_failure')), 'cancelurl': u'http://%s%s' % ( request.META.get('HTTP_HOST'), reverse('plata_order_payment_failure')), } # create hash value_strings = [u'{0}={1}{2}'.format(key.upper(), value, OGONE['SHA1_IN']) for key, value in form_params.items()] hash_string = u''.join(sorted(value_strings)) encoded_hash_string = sha1(hash_string.encode('utf-8')).hexdigest() # add hash and additional params form_params.update({ 'SHASign': encoded_hash_string.upper(), 'mode': OGONE['LIVE'] and 'prod' or 'test', }) return self.shop.render(request, 'payment/ogone_form.html', { 'order': order, 'HTTP_HOST': request.META.get('HTTP_HOST'), 'form_params': form_params, 'locale': form_params['language'], })
def get_first_lang(): """Get the first lang of Accept-Language Header. """ request_lang = request.headers.get('Accept-Language').split(',') if request_lang: lang = locale.normalize(request_lang[0]).split('.')[0] else: lang = False return lang
def full_locale(langcode=None): if langcode is None: langcode = getlangcode() if langcode == 'en': # US return 'en_US.UTF-8' elif langcode == 'es': # MX return 'es_MX.UTF-8' else: return locale.normalize(langcode)
def _expand_lang(locale): from locale import normalize locale = normalize(locale) COMPONENT_CODESET = 1 << 0 COMPONENT_TERRITORY = 1 << 1 COMPONENT_MODIFIER = 1 << 2 # split up the locale into its base components mask = 0 pos = locale.find('@') if pos >= 0: modifier = locale[pos:] locale = locale[:pos] mask |= COMPONENT_MODIFIER else: modifier = '' pos = locale.find('.') if pos >= 0: codeset = locale[pos:] locale = locale[:pos] mask |= COMPONENT_CODESET else: codeset = '' pos = locale.find('_') if pos >= 0: territory = locale[pos:] locale = locale[:pos] mask |= COMPONENT_TERRITORY else: territory = '' language = locale ret = [] for i in range(mask + 1): if not (i & ~mask): # if all components for this combo exist ... val = language if i & COMPONENT_TERRITORY: val += territory if i & COMPONENT_CODESET: val += codeset if i & COMPONENT_MODIFIER: val += modifier ret.append(val) ret.reverse() return ret
def create( cls, locale=None, providers=None, generator=None, includes=None, # Should we use weightings (more realistic) or weight every element equally (faster)? # By default, use weightings for backwards compatibility & realism use_weighting=True, **config): if includes is None: includes = [] # fix locale to package name locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE locale = pylocale.normalize(locale).split('.')[0] if locale not in AVAILABLE_LOCALES: msg = f'Invalid configuration for faker locale `{locale}`' raise AttributeError(msg) config['locale'] = locale config['use_weighting'] = use_weighting providers = providers or PROVIDERS providers += includes faker = generator or Generator(**config) for prov_name in providers: if prov_name == 'faker.providers': continue prov_cls, lang_found = cls._get_provider_class(prov_name, locale) provider = prov_cls(faker) provider.__use_weighting__ = use_weighting provider.__provider__ = prov_name provider.__lang__ = lang_found faker.add_provider(provider) return faker
def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ return [ loc for loc in ( locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales) if can_set_locale(loc) ]
def srfdate(self, d_preset=None): """ Display date in SR format :param d_preset: Preset date format :return: date string """ strd = '' try: locale.setlocale(locale.LC_TIME, '') except Exception: pass try: if self.has_locale: locale.setlocale( locale.LC_TIME, locale.normalize(sickrage.app.config.gui_lang)) except Exception: try: if self.has_locale: locale.setlocale(locale.LC_TIME, self.en_US_norm) except Exception: self.has_locale = False try: if d_preset is not None: strd = self.dt.strftime(d_preset) else: strd = self.dt.strftime(sickrage.app.config.date_preset) finally: try: locale.setlocale(locale.LC_TIME, '') except Exception: pass return encoding.to_unicode(strd)
def test_write_only_selected(self): """Test that only the selected files are written""" settings = read_settings(path=None, override={ 'PATH': INPUT_PATH, 'OUTPUT_PATH': self.temp_path, 'CACHE_PATH': self.temp_cache, 'WRITE_SELECTED': [ os.path.join(self.temp_path, 'oh-yeah.html'), os.path.join(self.temp_path, 'categories.html'), ], 'LOCALE': locale.normalize('en_US'), }) pelican = Pelican(settings=settings) logger = logging.getLogger() orig_level = logger.getEffectiveLevel() logger.setLevel(logging.INFO) mute(True)(pelican.run)() logger.setLevel(orig_level) self.assertLogCountEqual( count=2, msg="Writing .*", level=logging.INFO)
def localeNameToWindowsLCID(localeName): """Retreave the Windows locale identifier (LCID) for the given locale name @param localeName: a string of 2letterLanguage_2letterCountry or or just 2letterLanguage @type localeName: string @returns: a Windows LCID @rtype: integer """ #Windows Vista is able to convert locale names to LCIDs func_LocaleNameToLCID=getattr(ctypes.windll.kernel32,'LocaleNameToLCID',None) if func_LocaleNameToLCID is not None: localeName=localeName.replace('_','-') LCID=func_LocaleNameToLCID(unicode(localeName),0) else: #Windows doesn't have this functionality, manually search Python's windows_locale dictionary for the LCID localeName=locale.normalize(localeName) if '.' in localeName: localeName=localeName.split('.')[0] LCList=[x[0] for x in locale.windows_locale.iteritems() if x[1]==localeName] if len(LCList)>0: LCID=LCList[0] else: LCID=0 return LCID
def create(cls, locale=None, providers=None, generator=None, **config): # fix locale to package name locale = locale.replace('-', '_') if locale else DEFAULT_LOCALE locale = pylocale.normalize(locale).split('.')[0] if locale not in AVAILABLE_LOCALES: msg = 'Invalid configuration for faker locale "{0}"'.format(locale) raise AttributeError(msg) providers = providers or DEFAULT_PROVIDERS faker = generator or Generator(**config) faker.add_provider(providers_mod.BaseProvider) for prov_name in providers: prov_cls, lang_found = cls._get_provider_class(prov_name, locale) provider = prov_cls(faker) provider.__provider__ = prov_name provider.__lang__ = lang_found faker.add_provider(provider) return faker
def setUp(self, override=None): self.temp_path = mkdtemp(prefix='pelicantests.') settings = { 'PATH': os.path.join(os.path.dirname(CUR_DIR), '..', 'test_data', 'content'), 'OUTPUT_PATH': self.temp_path, 'PLUGINS': [pdf], 'LOCALE': locale.normalize('en_US'), } if override: settings.update(override) self.settings = read_settings(override=settings) pelican = Pelican(settings=self.settings) try: pelican.run() except ValueError: logging.warn('Relative links in the form of ' + '|filename|images/test.png are not yet handled by ' + ' the pdf generator') pass
def _valid_locales(locales, normalize): """Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ if normalize: normalizer = lambda x: locale.normalize(x.strip()) else: normalizer = lambda x: x.strip() return list(filter(_can_set_locale, map(normalizer, locales)))
def detect_user_locale(): import locale if renpy.windows: import ctypes windll = ctypes.windll.kernel32 # type: ignore locale_name = locale.windows_locale.get( windll.GetUserDefaultUILanguage()) elif renpy.android: from jnius import autoclass # type: ignore Locale = autoclass('java.util.Locale') locale_name = str(Locale.getDefault().getLanguage()) elif renpy.ios: import pyobjus # type: ignore NSLocale = pyobjus.autoclass("NSLocale") languages = NSLocale.preferredLanguages() locale_name = languages.objectAtIndex_(0).UTF8String() if isinstance(locale_name, bytes): locale_name = locale_name.decode("utf-8") local_name = locale_name.replace("-", "_") else: locale_name = locale.getdefaultlocale() if locale_name is not None: locale_name = locale_name[0] if locale_name is None: return None, None normalize = locale.normalize(locale_name) if normalize == locale_name: language = region = locale_name else: locale_name = normalize if '.' in locale_name: locale_name, _ = locale_name.split('.', 1) language, region = locale_name.lower().split("_") return language, region
def home(request): if not request.user.is_authenticated( ) or not request.user.vvsuser.can_moderate: return month_view(request) today = datetime.date.today() setlocale(LC_TIME, normalize('nl_NL')) nextMonth = today while nextMonth.month == today.month: nextMonth += datetime.timedelta(days=1) months = [] for dt in [today, nextMonth]: months.append( dict(split_date(dt, include_day=False), human=dt.strftime("%B %Y").capitalize())) counters = {} for i in ('pending', 'approved', 'needsigning', 'cancel_request'): counters[i] = Reservation.objects.filter(state=i).count() return render_to_response('moderator_home.html', { 'months': months, 'counters': counters }, context_instance=RequestContext(request))
def setlocale(category, locale=None): if locale and not isinstance(locale, (_locale._str, _locale._unicode)): locale = _locale.normalize(_locale._build_localename(locale)) res = try_locale(category, locale) if res is False: if '.' in locale: locale, code_page = locale.split('.') code_page = '.' + code_page else: code_page = '' if code_page: locale = locale + code_page.replace('cp', '').replace( 'windows-', '') res = try_locale(category, locale) locale = locale.rsplit('.', 1)[0] if res is False: iso_code = locale.replace('_', '-') lang_name, country_name = Locale.GetEnglishNames(iso_code) locale = lang_name + '_' + country_name + code_page res = try_locale(category, locale) if res is False: code_page = code_page.replace('cp', '').replace('windows-', '') locale = lang_name + '_' + country_name + code_page res = try_locale(category, locale) if res is False: raise return res
def test_turkish_locale(self): settings = read_settings( override = {'LOCALE': locale.normalize('tr_TR.UTF-8'), 'TEMPLATE_PAGES': {'template/source.html': 'generated/file.html'}}) generator = TemplatePagesGenerator( {'date': self.date}, settings, self.temp_content, '', self.temp_output) generator.env.filters.update({'strftime': utils.DateFormatter()}) writer = Writer(self.temp_output, settings=settings) generator.generate_output(writer) output_path = os.path.join( self.temp_output, 'generated', 'file.html') # output file has been generated self.assertTrue(os.path.exists(output_path)) # output content is correct with utils.pelican_open(output_path) as output_file: self.assertEqual(output_file, utils.strftime(self.date, 'date = %A, %d %B %Y'))
def SetLanguage(): import locale language = os.getenv('LANG') if not language: return language = language.split('.')[ 0] # Split off ignored .encoding part if present orig_language = language try: locale.setlocale(locale.LC_ALL, language) except locale.Error, e: if sys.platform != 'win32': # Don't try on Windows, it will probably not work # sys.stderr.write("Failed to set LC_ALL to %s (%s)\n" % (language, e)) try: # Locale lang.encoding might be missing. Let's try # UTF-8 encoding before giving up as on Linux systems # lang.UTF-8 locales are more common than legacy # ISO-8859 ones. language = locale.normalize('%s.UTF-8' % (language)) locale.setlocale(locale.LC_ALL, language) except locale.Error, e: # If we got so far, provided locale is not supported # on this system sys.stderr.write("Failed to set LC_ALL to %s (%s)\n" % (language, e)) ### locale.getdefaultlocale() is probably related to gettext? # try: # default_locale = locale.getdefaultlocale() # except: # default_locale = None # if default_locale and default_locale[0]: # language = default_locale[0] # else: language = 'C'
def process_message(self, original_message): try: return { 'message': original_message['text'], 'author': original_message['user']['name'], 'metadata': { 'date': datetime.fromtimestamp( int(original_message['timestamp_ms']) / 1000), 'url': original_message['id'], 'type': 'post', 'source': 'twitter', 'source_import': 'twinl', 'lang': locale.normalize('{}.utf-8'.format( original_message['lang'])) } } except KeyError: print(original_message)
def get_codes(code): # Determine language and country from 2-5 letters language codes. if len(code) == 2: if code in DEFAULT_COUNTRY4LANG: # We want to be able to override the default country # from language found using the locale.normalize(). # E.g. we might want default country to be 'uk' for language 'en' lang = code.lower() ctry = DEFAULT_COUNTRY4LANG[lang] return (lang, ctry) else: # If you pass normalize() a 2 letter code like 'el' it returns # a normalised locale name like 'el_GR.ISO8859-7', from where you # can get the default country code return locale.normalize(code)[:5].lower().split("_") elif code == 'sr-latn': # This is a special case which didn't seem to be recognized by the # locale module return ['sr', 'rs'] elif len(code) == 5: return code.lower().split("-") else: raise ImproperlyConfigured( "Invalid language code in settings.LANGUAGES: %s" % code)
def nice_date(date, lang): lang_full = locale.normalize(lang) locale.setlocale(locale.LC_ALL, lang_full) result = '{0:%A} {1} {0:%B} {0:%Y}'.format(date, date.day) locale.setlocale(locale.LC_ALL, 'C') # reset return result
def __init_first_instance(self): """ Initialize the primary locale from whatever might be available. We only do this once, and the resulting GrampsLocale is returned by default. """ global _hdlr _hdlr = logging.StreamHandler() _hdlr.setFormatter( logging.Formatter(fmt="%(name)s.%(levelname)s: %(message)s")) LOG.addHandler(_hdlr) #Now that we have a logger set up we can issue the icu error if needed. if not HAVE_ICU: LOG.warning(_icu_err) # Even the first instance can be overridden by passing lang # and languages to the constructor. If it isn't (which is the # expected behavior), do platform-specific setup: if not (self.lang and self.language): if sys.platform == 'darwin': from . import maclocale maclocale.mac_setup_localization(self) elif sys.platform == 'win32': self._win_init_environment() else: self._init_from_environment() else: self.numeric = self.currency = self.calendar = self.collation = self.lang if not self.lang: self.lang = 'en_US.UTF-8' if not self.language: self.language.append('en') if not self.localedir and not self.lang.startswith('en'): LOG.warning( "No translations for %s were found, setting localization to U.S. English", self.localedomain) self.lang = 'en_US.UTF-8' self.language = ['en'] #Next, we need to know what is the encoding from the native #environment. This is used by python standard library funcions which #localize their output, e.g. time.strftime(). NB: encoding is a class variable. if not self.encoding: self.encoding = (locale.getpreferredencoding() or sys.getdefaultencoding()) LOG.debug("Setting encoding to %s", self.encoding) # Make sure that self.lang and self.language are reflected # back into the environment for Gtk to use when its # initialized. If self.lang isn't 'C', make sure that it has a # 'UTF-8' suffix, because that's all that GtkBuilder can # digest. # Gtk+ has an 'en' po, but we don't. This is worked-around for # our GrampsTranslation class but that isn't used to retrieve # translations in GtkBuilder (glade), a direct call to libintl # (gettext) is. If 'en' is in the translation list it gets # skipped in favor of the next language, which can cause # inappropriate translations of strings in glade/ui files. To # prevent this, if 'en' is in self.language it's the last # entry: if 'en' in self.language: self.language = self.language[:self.language.index('en') + 1] # Linux note: You'll get unsupported locale errors from Gtk # and untranslated strings if the requisite UTF-8 locale isn't # installed. This is particularly a problem on Debian and # Debian-derived distributions which by default don't install # a lot of locales. lang = locale.normalize(self.language[0] if self.language[0] else 'C') check_lang = lang.split('.') if not check_lang[0] in ('C', 'en'): if len(check_lang) < 2 or check_lang[1] not in ("utf-8", "UTF-8"): lang = '.'.join((check_lang[0], 'UTF-8')) os.environ["LANG"] = lang #We need to convert 'en' and 'en_US' to 'C' to avoid confusing #GtkBuilder when it's retrieving strings from our Glade files #since we have neither an en.po nor an en_US.po. os.environ["LANGUAGE"] = ':'.join(self.language) # GtkBuilder uses GLib's g_dgettext wrapper, which oddly is bound # with locale instead of gettext. Win32 doesn't support bindtextdomain. if self.localedir: if not sys.platform == 'win32': locale.bindtextdomain(self.localedomain, self.localedir) else: self._win_bindtextdomain(self.localedomain.encode('utf-8'), self.localedir.encode('utf-8'))
def main(): parser = argparse.ArgumentParser() parser.add_argument("csv_file", help=ARG_HELP_STRINGS["csv_file"]) parser.add_argument("-O", "--offsetting_mode", help=ARG_HELP_STRINGS["offsetting"]) parser.add_argument("-b", "--bypass-cert-verification", action="store_true", help=ARG_HELP_STRINGS["bypass"]) parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"]) parser.add_argument("-f", "--force", action="store_true", help=ARG_HELP_STRINGS["force"]) parser.add_argument("-i", "--ignore-header", action="store_true", help=ARG_HELP_STRINGS["ignore_header"]) parser.add_argument("-j", "--force-header", action="store_true", help=ARG_HELP_STRINGS["force_header"]) parser.add_argument("-l", "--locale", help=ARG_HELP_STRINGS["locale"]) parser.add_argument("-a", "--add-unknown-columns", action="store_true", help=ARG_HELP_STRINGS["unknown_columns"]) parser.add_argument("-d", "--dialect", choices=["excel", "excel-tab", "unix"], help=ARG_HELP_STRINGS["dialect"]) parser.add_argument("-v", "--verbose", action="store_true", help=ARG_HELP_STRINGS["verbose"]) parser.add_argument("-o", "--overwrite", action="store_true", help=ARG_HELP_STRINGS["overwrite"]) parser.add_argument("-u", "--update", action="store_true", help=ARG_HELP_STRINGS["update"]) parser.add_argument("-r", "--round_monetary", action="store_true", help=ARG_HELP_STRINGS["round_monetary"]) parser.add_argument("--no-crossref", action="store_true", help=ARG_HELP_STRINGS["no_crossref"]) parser.add_argument("--no-pubmed", action="store_true", help=ARG_HELP_STRINGS["no_pubmed"]) parser.add_argument("--no-doaj", action="store_true", help=ARG_HELP_STRINGS["no_doaj"]) parser.add_argument("-institution", "--institution_column", type=int, help=ARG_HELP_STRINGS["institution"]) parser.add_argument("-period", "--period_column", type=int, help=ARG_HELP_STRINGS["period"]) parser.add_argument("-doi", "--doi_column", type=int, help=ARG_HELP_STRINGS["doi"]) parser.add_argument("-euro", "--euro_column", type=int, help=ARG_HELP_STRINGS["euro"]) parser.add_argument("-is_hybrid", "--is_hybrid_column", type=int, help=ARG_HELP_STRINGS["is_hybrid"]) parser.add_argument("-publisher", "--publisher_column", type=int, help=ARG_HELP_STRINGS["publisher"]) parser.add_argument("-journal_full_title", "--journal_full_title_column", type=int, help=ARG_HELP_STRINGS["journal_full_title"]) parser.add_argument("-book_title", "--book_title_column", type=int, help=ARG_HELP_STRINGS["book_title"]) parser.add_argument("-issn", "--issn_column", type=int, help=ARG_HELP_STRINGS["issn"]) parser.add_argument("-isbn", "--isbn_column", type=int, help=ARG_HELP_STRINGS["isbn"]) parser.add_argument("-backlist_oa", "--backlist_oa_column", type=int, help=ARG_HELP_STRINGS["backlist_oa"]) parser.add_argument("-additional_isbns", "--additional_isbn_columns", type=int, nargs='+', help=ARG_HELP_STRINGS["additional_isbns"]) parser.add_argument("-url", "--url_column", type=int, help=ARG_HELP_STRINGS["url"]) parser.add_argument("-start", type=int, help=ARG_HELP_STRINGS["start"]) parser.add_argument("-end", type=int, help=ARG_HELP_STRINGS["end"]) args = parser.parse_args() handler = logging.StreamHandler(sys.stderr) handler.setFormatter(oat.ANSIColorFormatter()) bufferedHandler = oat.BufferedErrorHandler(handler) bufferedHandler.setFormatter(oat.ANSIColorFormatter()) logging.root.addHandler(handler) logging.root.addHandler(bufferedHandler) logging.root.setLevel(logging.INFO) if args.locale: norm = locale.normalize(args.locale) if norm != args.locale: msg = "locale '{}' not found, normalised to '{}'".format( args.locale, norm) oat.print_y(msg) try: loc = locale.setlocale(locale.LC_ALL, norm) oat.print_g("Using locale " + loc) except locale.Error as loce: msg = "Setting locale to {} failed: {}".format(norm, loce.message) oat.print_r(msg) sys.exit() enc = None # CSV file encoding if args.encoding: try: codec = codecs.lookup(args.encoding) msg = ("Encoding '{}' found in Python's codec collection " + "as '{}'").format(args.encoding, codec.name) oat.print_g(msg) enc = args.encoding except LookupError: msg = ("Error: '" + args.encoding + "' not found Python's " + "codec collection. Either look for a valid name here " + "(https://docs.python.org/2/library/codecs.html#standard-" + "encodings) or omit this argument to enable automated " + "guessing.") oat.print_r(msg) sys.exit() result = oat.analyze_csv_file(args.csv_file, enc=enc) if result["success"]: csv_analysis = result["data"] print(csv_analysis) else: print(result["error_msg"]) sys.exit() if args.dialect: dialect = args.dialect oat.print_g('Dialect sniffing results ignored, using built-in CSV dialect "' + dialect + '"') else: dialect = csv_analysis.dialect if enc is None: enc = csv_analysis.enc has_header = csv_analysis.has_header or args.force_header if enc is None: print("Error: No encoding given for CSV file and automated " + "detection failed. Please set the encoding manually via the " + "--enc argument") sys.exit() csv_file = open(args.csv_file, "r", encoding=enc) reader = csv.reader(csv_file, dialect=dialect) first_row = next(reader) num_columns = len(first_row) print("\nCSV file has {} columns.".format(num_columns)) csv_file.seek(0) reader = csv.reader(csv_file, dialect=dialect) if args.update and args.overwrite: oat.print_r("Error: Either use the -u or the -o option, not both.") sys.exit() if args.overwrite: for column in OVERWRITE_STRATEGY.keys(): OVERWRITE_STRATEGY[column] = CSVColumn.OW_ALWAYS elif not args.update: for column in OVERWRITE_STRATEGY.keys(): OVERWRITE_STRATEGY[column] = CSVColumn.OW_ASK additional_isbn_columns = [] if args.additional_isbn_columns: for index in args.additional_isbn_columns: if index > num_columns: msg = "Error: Additional ISBN column index {} exceeds number of columns ({})." oat.print_r(msg.format(index, num_columns)) sys.exit() else: additional_isbn_columns.append(index) column_map = { "institution": CSVColumn("institution", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.institution_column, overwrite=OVERWRITE_STRATEGY["institution"]), "period": CSVColumn("period",{"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.period_column, overwrite=OVERWRITE_STRATEGY["period"]), "euro": CSVColumn("euro", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.euro_column, overwrite=OVERWRITE_STRATEGY["euro"]), "doi": CSVColumn("doi", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.MANDATORY}, args.doi_column, overwrite=OVERWRITE_STRATEGY["doi"]), "is_hybrid": CSVColumn("is_hybrid", {"articles": CSVColumn.MANDATORY, "books": CSVColumn.NONE}, args.is_hybrid_column, overwrite=OVERWRITE_STRATEGY["is_hybrid"]), "publisher": CSVColumn("publisher", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.publisher_column, overwrite=OVERWRITE_STRATEGY["publisher"]), "journal_full_title": CSVColumn("journal_full_title", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.journal_full_title_column, overwrite=OVERWRITE_STRATEGY["journal_full_title"]), "issn": CSVColumn("issn", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.issn_column, overwrite=OVERWRITE_STRATEGY["issn"]), "issn_print": CSVColumn("issn_print", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_print"]), "issn_electronic": CSVColumn("issn_electronic", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_electronic"]), "issn_l": CSVColumn("issn_l", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["issn_l"]), "license_ref": CSVColumn("license_ref", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE} , None, overwrite=OVERWRITE_STRATEGY["license_ref"]), "indexed_in_crossref": CSVColumn("indexed_in_crossref", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["indexed_in_crossref"]), "pmid": CSVColumn("pmid", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["pmid"]), "pmcid": CSVColumn("pmcid", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["pmcid"]), "ut": CSVColumn("ut", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["ut"]), "url": CSVColumn("url", {"articles": CSVColumn.BACKUP, "books": CSVColumn.NONE}, args.url_column, overwrite=OVERWRITE_STRATEGY["url"]), "doaj": CSVColumn("doaj", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["doaj"]), "agreement": CSVColumn("agreement", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["agreement"]), "book_title": CSVColumn("book_title", {"articles": CSVColumn.NONE, "books": CSVColumn.RECOMMENDED}, args.book_title_column, overwrite=OVERWRITE_STRATEGY["book_title"]), "backlist_oa": CSVColumn("backlist_oa", {"articles": CSVColumn.NONE, "books": CSVColumn.MANDATORY}, args.backlist_oa_column, overwrite=OVERWRITE_STRATEGY["backlist_oa"]), "isbn": CSVColumn("isbn", {"articles": CSVColumn.NONE, "books": CSVColumn.BACKUP}, args.isbn_column, overwrite=OVERWRITE_STRATEGY["isbn"]), "isbn_print": CSVColumn("isbn_print", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["isbn_print"]), "isbn_electronic": CSVColumn("isbn_electronic", {"articles": CSVColumn.NONE, "books": CSVColumn.NONE}, None, overwrite=OVERWRITE_STRATEGY["isbn_electronic"]) } header = None if has_header: for row in reader: if not row: # Skip empty lines continue header = row # First non-empty row should be the header if args.ignore_header: print("Skipping header analysis due to command line argument.") break else: print("\n *** Analyzing CSV header ***\n") for (index, item) in enumerate(header): if index in additional_isbn_columns: msg = "Column named '{}' at index {} is designated as additional ISBN column" print(msg.format(item, index)) continue column_type = oat.get_column_type_from_whitelist(item) if column_type is not None and column_map[column_type].index is None: column_map[column_type].index = index column_map[column_type].column_name = item found_msg = ("Found column named '{}' at index {}, " + "assuming this to be the '{}' column.") print(found_msg.format(item, index, column_type)) break print("\n *** Starting heuristical analysis ***\n") for row in reader: if not row: # Skip empty lines # We analyze the first non-empty line, a possible header should # have been processed by now. continue column_candidates = { "doi": [], "period": [], "euro": [] } found_msg = "The entry in column {} looks like a potential {}: {}" for (index, entry) in enumerate(row): if index in [csvcolumn.index for csvcolumn in column_map.values()] + additional_isbn_columns: # Skip columns already assigned continue entry = entry.strip() # Search for a DOI if column_map['doi'].index is None: if oat.DOI_RE.match(entry): column_id = str(index) # identify column either numerically or by column header if header: column_id += " ('" + header[index] + "')" print(found_msg.format(column_id, "DOI", entry)) column_candidates['doi'].append(index) continue # Search for a potential year string if column_map['period'].index is None: try: maybe_period = int(entry) now = datetime.date.today().year # Should be a wide enough margin if maybe_period >= 2000 and maybe_period <= now + 2: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print(found_msg.format(column_id, "year", entry)) column_candidates['period'].append(index) continue except ValueError: pass # Search for a potential monetary amount if column_map['euro'].index is None: try: maybe_euro = locale.atof(entry) if maybe_euro >= 10 and maybe_euro <= 10000: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print (found_msg.format(column_id, "euro amount", entry)) column_candidates['euro'].append(index) continue except ValueError: pass for column_type, candidates in column_candidates.items(): if column_map[column_type].index is not None: continue if len(candidates) > 1: print("Could not reliably identify the '" + column_type + "' column - more than one possible candiate!") elif len(candidates) < 1: print("No candidate found for column '" + column_type + "'!") else: index = candidates.pop() column_map[column_type].index = index if header: column_id = header[index] column_map[column_type].column_name = column_id else: column_id = index msg = "Assuming column '{}' to be the '{}' column." print(msg.format(column_id, column_type)) column_map[column_type].index = index break print("\n *** CSV file analysis summary ***\n") index_dict = {csvc.index: csvc for csvc in column_map.values()} for index in range(num_columns): column_name = "" if header: column_name = header[index] if index in index_dict: column = index_dict[index] msg = u"column number {} ({}) is the '{}' column ({})".format( index, column_name, column.column_type, column.get_req_description()) print(msg) elif index in additional_isbn_columns: msg = u"column number {} ({}) is an additional ISBN column".format(index, column_name) oat.print_c(msg) else: if args.add_unknown_columns: msg = (u"column number {} ({}) is an unknown column, it will be " + "appended to the generated CSV file") print(msg.format(index, column_name)) if not column_name: # Use a generic name column_name = "unknown" while column_name in column_map.keys(): # TODO: Replace by a numerical, increasing suffix column_name += "_" column_map[column_name] = CSVColumn(column_name, CSVColumn.NONE, index) else: msg = (u"column number {} ({}) is an unknown column, it will be " + "ignored") print(msg.format(index, column_name)) print() for column in column_map.values(): if column.index is None: msg = "The '{}' column could not be identified ({})" print(msg.format(column.column_type, column.get_req_description())) print() article_mand_missing = [x.column_type for x in column_map.values() if x.requirement["articles"] == CSVColumn.MANDATORY and x.index is None] article_back_missing = [x.column_type for x in column_map.values() if x.requirement["articles"] == CSVColumn.BACKUP and x.index is None] book_mand_missing = [x.column_type for x in column_map.values() if x.requirement["books"] == CSVColumn.MANDATORY and x.index is None] book_back_missing = [x.column_type for x in column_map.values() if x.requirement["books"] == CSVColumn.BACKUP and x.index is None] if article_mand_missing: msg = "Article enrichment is not possible - mandatory columns are missing ({})" oat.print_y(msg.format(", ".join(article_mand_missing))) elif article_back_missing: msg = "Article enrichment is possible, but backup columns are missing ({}) - each record will need a valid DOI" oat.print_b(msg.format(", ".join(article_back_missing))) else: oat.print_g("Article enrichment is possible with all backup columns in place") if book_mand_missing: msg = "Book enrichment is not possible - mandatory columns are missing ({})" oat.print_y(msg.format(", ".join(book_mand_missing))) elif book_back_missing: msg = "Book enrichment is possible, but backup columns are missing ({}) - each record will need a valid DOI" oat.print_b(msg.format(", ".join(book_back_missing))) else: oat.print_g("Book enrichment is possible with all backup columns in place") print() if article_mand_missing and book_mand_missing: if not args.force: oat.print_r("ERROR: Could not detect the minimum mandatory data set for any " + "publication type. There are 2 ways to fix this:") if not header: print("1) Add a header row to your file and identify the " + "column(s) by assigning them an appropiate column name.") else: print("1) Identify the missing column(s) by assigning them " + "a different column name in the CSV header (You can " + "use the column name(s) mentioned in the message above)") print("2) Use command line parameters when calling this script " + "to identify the missing columns (use -h for help) ") sys.exit() else: oat.print_y("WARNING: Could not detect the minimum mandatory data set for any " + "publication type - forced to continue.") start = input("\nStart metadata aggregation? (y/n):") while start not in ["y", "n"]: start = input("Please type 'y' or 'n':") if start == "n": sys.exit() print("\n *** Starting metadata aggregation ***\n") enriched_content = {} for record_type, fields in oat.COLUMN_SCHEMAS.items(): # add headers enriched_content[record_type] = { "count": 0, "content": [list(fields)] } if not os.path.isdir("tempfiles"): os.mkdir("tempfiles") isbn_handling = oat.ISBNHandling("tempfiles/ISBNRangeFile.xml") doab_analysis = oat.DOABAnalysis(isbn_handling, "tempfiles/DOAB.csv", verbose=False) doaj_analysis = oat.DOAJAnalysis("tempfiles/DOAJ.csv") csv_file.seek(0) reader = csv.reader(csv_file, dialect=dialect) header_processed = False row_num = 0 for row in reader: row_num += 1 if not row: continue # skip empty lines if not header_processed: header_processed = True if has_header: # If the CSV file has a header, we are currently there - skip it # to get to the first data row continue if args.start and args.start > row_num: continue if args.end and args.end < row_num: continue print("---Processing line number " + str(row_num) + "---") result_type, enriched_row = oat.process_row(row, row_num, column_map, num_columns, additional_isbn_columns, doab_analysis, doaj_analysis, args.no_crossref, args.no_pubmed, args.no_doaj, args.round_monetary, args.offsetting_mode) for record_type, value in enriched_content.items(): if record_type == result_type: value["content"].append(enriched_row) value["count"] += 1 else: empty_line = ["" for x in value["content"][0]] value["content"].append(empty_line) csv_file.close() for record_type, value in enriched_content.items(): if value["count"] > 0: with open('out_' + record_type + '.csv', 'w') as out: writer = oat.OpenAPCUnicodeWriter(out, oat.OPENAPC_STANDARD_QUOTEMASK, True, True, True) writer.write_rows(value["content"]) if not bufferedHandler.buffer: oat.print_g("Metadata enrichment successful, no errors occured") else: oat.print_r("There were errors during the enrichment process:\n") # closing will implicitly flush the handler and print any buffered # messages to stderr bufferedHandler.close()
def insertInAppPurchaseProducts(self, applicationSKU, metadata, products): xmlMetadataRoot = ET.fromstring(metadata) xmlProducts = xmlMetadataRoot.find( "./{http://apple.com/itunes/importer}software" + "/{http://apple.com/itunes/importer}software_metadata" + "/{http://apple.com/itunes/importer}in_app_purchases"); if xmlProducts == None: xmlProducts = ET.Element("{http://apple.com/itunes/importer}in_app_purchases") xmlMetadataRoot.find( "./{http://apple.com/itunes/importer}software" + "/{http://apple.com/itunes/importer}software_metadata").append(xmlProducts) productCounter = 0 for product in products: xmlProduct = ET.Element("{http://apple.com/itunes/importer}in_app_purchase") if isinstance(product, self.RegionAsProduct): if product.priceTier == 0: continue xmlProductId = ET.Element("{http://apple.com/itunes/importer}product_id") xmlProductId.text = applicationSKU.replace(".app", ".region.") + product.regionId.replace("_", ".").replace("-", "_") xmlProduct.append(xmlProductId) xmlReferenceName = ET.Element("{http://apple.com/itunes/importer}reference_name") xmlReferenceName.text = "region:" + product.regionId xmlProduct.append(xmlReferenceName) xmlType = ET.Element("{http://apple.com/itunes/importer}type") xmlType.text = "non-consumable" xmlProduct.append(xmlType) xmlPrices = ET.Element("{http://apple.com/itunes/importer}products") xmlPrice = ET.Element("{http://apple.com/itunes/importer}product") xmlPriceEnable = ET.Element("{http://apple.com/itunes/importer}cleared_for_sale") xmlPriceEnable.text = "true" xmlPrice.append(xmlPriceEnable) xmlPriceIntervals = ET.Element("{http://apple.com/itunes/importer}intervals") xmlPriceInterval = ET.Element("{http://apple.com/itunes/importer}interval") xmlPriceIntervalStartDate = ET.Element("{http://apple.com/itunes/importer}start_date") xmlPriceIntervalStartDate.text = time.strftime("%Y-%m-%d") xmlPriceInterval.append(xmlPriceIntervalStartDate) xmlPriceIntervalTier = ET.Element("{http://apple.com/itunes/importer}wholesale_price_tier") xmlPriceIntervalTier.text = str(self.priceTiers[product.priceTier]) xmlPriceInterval.append(xmlPriceIntervalTier) xmlPriceIntervals.append(xmlPriceInterval) xmlPrice.append(xmlPriceIntervals) xmlPrices.append(xmlPrice) xmlProduct.append(xmlPrices) xmlReviewNotes = ET.Element("{http://apple.com/itunes/importer}review_notes") xmlReviewNotes.text = ( "This product identifies detailed map of specified region. " "Content of map itself is downloaded from OsmAnd internal servers. " "There's no speicific screenshot of the map, so an screenshot of region overview map is attached.") xmlProduct.append(xmlReviewNotes) # Complete fake for now screenshotFilename = os.path.abspath(tempfile.gettempdir() + "/" + applicationSKU + ".itmsp/" + product.regionId + ".png") shutil.copy("empty.png", screenshotFilename) xmlReviewScreenshot = ET.Element("{http://apple.com/itunes/importer}review_screenshot") xmlReviewScreenshotSize = ET.Element("{http://apple.com/itunes/importer}size") xmlReviewScreenshotSize.text = str(os.path.getsize(screenshotFilename)) xmlReviewScreenshot.append(xmlReviewScreenshotSize) xmlReviewScreenshotFilename = ET.Element("{http://apple.com/itunes/importer}file_name") xmlReviewScreenshotFilename.text = product.regionId + ".png" xmlReviewScreenshot.append(xmlReviewScreenshotFilename) xmlReviewScreenshotCrc = ET.Element("{http://apple.com/itunes/importer}checksum", type="md5") xmlReviewScreenshotCrc.text = hashlib.md5(open(screenshotFilename, "rb").read()).hexdigest() xmlReviewScreenshot.append(xmlReviewScreenshotCrc) xmlProduct.append(xmlReviewScreenshot) langEnSet = False getLangOnlyRegex = re.compile(r"^[^.]*") xmlProductLocales = ET.Element("{http://apple.com/itunes/importer}locales") for lang, name in product.localizedNames.items(): xmlProductLocale = ET.Element("{http://apple.com/itunes/importer}locale") # Seems that iTMSTransporter has issue counting bytes instead of characters if len(name.encode("utf-8")) > 75: continue properLang = re.search(getLangOnlyRegex, locale.normalize(lang)).group(0).replace("_", "-") if properLang not in self.allowedLanguages: continue if lang == "en": langEnSet = True xmlProductLocale.set("name", properLang) xmlTitle = ET.Element("{http://apple.com/itunes/importer}title") xmlTitle.text = name[:(75-3)] + "..." if len(name) > 75 else name xmlProductLocale.append(xmlTitle) xmlDescription = ET.Element("{http://apple.com/itunes/importer}description") xmlDescription.text = name + " (OsmAnd)" xmlProductLocale.append(xmlDescription) xmlProductLocales.append(xmlProductLocale) if not langEnSet: xmlProductLocale = ET.Element("{http://apple.com/itunes/importer}locale") xmlProductLocale.set("name", "en-US") properName = product.name if not properName: properName = product.regionId # Seems that iTMSTransporter has issue counting bytes instead of characters if len(properName.encode("utf-8")) > 75: continue xmlTitle = ET.Element("{http://apple.com/itunes/importer}title") xmlTitle.text = properName[:(75-3)] + "..." if len(properName) > 75 else properName xmlProductLocale.append(xmlTitle) xmlDescription = ET.Element("{http://apple.com/itunes/importer}description") xmlDescription.text = properName + " (OsmAnd)" xmlProductLocale.append(xmlDescription) xmlProductLocales.append(xmlProductLocale) xmlProduct.append(xmlProductLocales) xmlProducts.append(ET.Comment("Product #" + str(productCounter))) xmlProducts.append(xmlProduct) productCounter += 1 return '<?xml version="1.0" encoding="UTF-8"?>\n' + ET.tostring(xmlMetadataRoot, encoding="utf-8").decode("utf-8")
# Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html #LANGUAGE_CODE = 'en-us' LANGUAGE_CODE = 'is_IS.UTF8' import locale locale.setlocale(locale.LC_ALL,locale.normalize(LANGUAGE_CODE)) SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '/srv/petit/media/'
def setup_gettext(localedir, ui_language=None, logger=None): """Setup locales, load translations, install gettext functions.""" if not logger: logger = lambda *a, **b: None # noqa: E731 current_locale = '' if ui_language: try: current_locale = locale.normalize(ui_language + '.' + locale.getpreferredencoding()) locale.setlocale(locale.LC_ALL, current_locale) except Exception as e: logger(e) else: if IS_WIN: from ctypes import windll try: current_locale = locale.windows_locale[ windll.kernel32.GetUserDefaultUILanguage()] current_locale += '.' + locale.getpreferredencoding() locale.setlocale(locale.LC_ALL, current_locale) except KeyError: try: current_locale = locale.setlocale(locale.LC_ALL, '') except Exception as e: logger(e) except Exception as e: logger(e) elif IS_MACOS: try: import Foundation defaults = Foundation.NSUserDefaults.standardUserDefaults() current_locale = defaults.objectForKey_('AppleLanguages')[0] current_locale = current_locale.replace('-', '_') locale.setlocale(locale.LC_ALL, current_locale) except Exception as e: logger(e) else: try: locale.setlocale(locale.LC_ALL, '') current_locale = '.'.join(locale.getlocale(locale.LC_MESSAGES)) except Exception as e: logger(e) os.environ['LANGUAGE'] = os.environ['LANG'] = current_locale QLocale.setDefault(QLocale(current_locale)) logger("Using locale %r", current_locale) try: logger("Loading gettext translation, localedir=%r", localedir) trans = gettext.translation("picard", localedir) trans.install(True) _ngettext = trans.ngettext logger("Loading gettext translation (picard-countries), localedir=%r", localedir) trans_countries = gettext.translation("picard-countries", localedir) _gettext_countries = trans_countries.gettext logger("Loading gettext translation (picard-attributes), localedir=%r", localedir) trans_attributes = gettext.translation("picard-attributes", localedir) _gettext_attributes = trans_attributes.gettext except IOError as e: logger(e) builtins.__dict__['_'] = lambda a: a def _ngettext(a, b, c): if c == 1: return a else: return b def _gettext_countries(msg): return msg def _gettext_attributes(msg): return msg builtins.__dict__['ngettext'] = _ngettext builtins.__dict__['gettext_countries'] = _gettext_countries builtins.__dict__['gettext_attributes'] = _gettext_attributes logger("_ = %r", _) logger("N_ = %r", N_) logger("ngettext = %r", ngettext) logger("gettext_countries = %r", gettext_countries) logger("gettext_attributes = %r", gettext_attributes)
class srDateTime(datetime): has_locale = True en_US_norm = locale.normalize('en_US.utf-8') @static_or_instance def convert_to_setting(self, dt=None): try: if sickrage.srCore.srConfig.TIMEZONE_DISPLAY == 'local': return dt.astimezone(sr_timezone) if self is None else self.astimezone(sr_timezone) else: return dt if self is None else self except Exception: return dt if self is None else self # display Time in SickRage Format @static_or_instance def srftime(self, dt=None, show_seconds=False, t_preset=None): """ Display time in SR format TODO: Rename this to srftime :param dt: datetime object :param show_seconds: Boolean, show seconds :param t_preset: Preset time format :return: time string """ try: locale.setlocale(locale.LC_TIME, '') except Exception: pass try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except Exception: try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, srDateTime.en_US_norm) except Exception: srDateTime.has_locale = False strt = '' try: if self is None: if dt is not None: if t_preset is not None: strt = dt.strftime(t_preset) elif show_seconds: strt = dt.strftime(sickrage.srCore.srConfig.TIME_PRESET_W_SECONDS) else: strt = dt.strftime(sickrage.srCore.srConfig.TIME_PRESET) else: if t_preset is not None: strt = self.strftime(t_preset) elif show_seconds: strt = self.strftime(sickrage.srCore.srConfig.TIME_PRESET_W_SECONDS) else: strt = self.strftime(sickrage.srCore.srConfig.TIME_PRESET) finally: try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, '') except Exception: srDateTime.has_locale = False return strt # display Date in SickRage Format @static_or_instance def srfdate(self, dt=None, d_preset=None): """ Display date in SR format TODO: Rename this to srfdate :param dt: datetime object :param d_preset: Preset date format :return: date string """ try: locale.setlocale(locale.LC_TIME, '') except Exception: pass strd = '' try: if self is None: if dt is not None: if d_preset is not None: strd = dt.strftime(d_preset) else: strd = dt.strftime(sickrage.srCore.srConfig.DATE_PRESET) else: if d_preset is not None: strd = self.strftime(d_preset) else: strd = self.strftime(sickrage.srCore.srConfig.DATE_PRESET) finally: try: locale.setlocale(locale.LC_TIME, '') except Exception: pass return strd # display Datetime in SickRage Format @static_or_instance def srfdatetime(self, dt=None, show_seconds=False, d_preset=None, t_preset=None): """ Show datetime in SR format TODO: Rename this to srfdatetime :param dt: datetime object :param show_seconds: Boolean, show seconds as well :param d_preset: Preset date format :param t_preset: Preset time format :return: datetime string """ try: locale.setlocale(locale.LC_TIME, '') except Exception: pass strd = '' try: if self is None: if dt is not None: if d_preset is not None: strd = dt.strftime(d_preset) else: strd = dt.strftime(sickrage.srCore.srConfig.DATE_PRESET) try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except Exception: try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, srDateTime.en_US_norm) except Exception: srDateTime.has_locale = False if t_preset is not None: strd += ', {}'.format(dt.strftime(t_preset)) elif show_seconds: strd += ', {}'.format(dt.strftime(sickrage.srCore.srConfig.TIME_PRESET_W_SECONDS)) else: strd += ', {}'.format(dt.strftime(sickrage.srCore.srConfig.TIME_PRESET)) else: if d_preset is not None: strd = self.strftime(d_preset) else: strd = self.strftime(sickrage.srCore.srConfig.DATE_PRESET) try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except Exception: try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, srDateTime.en_US_norm) except Exception: srDateTime.has_locale = False if t_preset is not None: strd += ', {}'.format(dt.strftime(t_preset)) elif show_seconds: strd += ', {}'.format(dt.strftime(sickrage.srCore.srConfig.TIME_PRESET_W_SECONDS)) else: strd += ', {}'.format(dt.strftime(sickrage.srCore.srConfig.TIME_PRESET)) finally: try: if srDateTime.has_locale: locale.setlocale(locale.LC_TIME, '') except Exception: srDateTime.has_locale = False return strd
def check(self, localename, expected): self.assertEqual(locale.normalize(localename), expected, msg=localename)
def set_state(request): setlocale(LC_TIME, normalize('nl_NL')) id = int(request.REQUEST['id']) newstate = request.REQUEST['state'] r = Reservation.objects.get(id=id) oldstate = r.state if request.user.vvsuser.can_moderate: pass elif r.hirer in request.user.vvsuser.cached_vvsgroups and oldstate in cancellable_states and newstate == "cancel_request": pass else: raise PermissionDenied r.state = newstate r.save() if oldstate == newstate: messages.info(request, "Status niet gewijzigd") elif oldstate == "pending" and newstate == "approved": if r.hirer.notifications_to: subj = "[Zaalrooster] Reservering %s %s goedgekeurd" % ( r.room, r.date.strftime("%d %b")) msg = "De reservering voor de %s op %s (%s) is goedgekeurd. (%s)" % ( r.room, r.date.strftime("%d %B %Y"), r.timeframe, r.name) em = EmailMessage(subj, msg, to=[r.hirer.notifications_to], cc=settings.MODERATORS) em.send() messages.success(request, "Mail over goedkeuring verzonden.") else: messages.info( request, "Mail over goedkeuring niet verzonden. Er is geen e-mailadres ingesteld voor %s." % r.hirer) elif oldstate == "pending" and newstate == "confirmed": if r.hirer.notifications_to: subj = "[Zaalrooster] Zaalhuur %s %s bevestigd" % ( r.room, r.date.strftime("%d %b")) msg = "De zaalhuur voor de %s op %s (%s) is getekend. (%s)" % ( r.room, r.date.strftime("%d %B %Y"), r.timeframe, r.name) em = EmailMessage(subj, msg, to=[r.hirer.notifications_to], cc=settings.MODERATORS) em.send() messages.success(request, "Mail over tekenen verzonden.") else: messages.info( request, "Mail over tekenen niet verzonden. Er is geen e-mailadres ingesteld voor %s." % r.hirer) elif oldstate in ("pending", "approved") and newstate == "needsigning": if r.hirer.notifications_to: subj = "[Zaalrooster] Zaalhuur %s %s te tekenen" % ( r.room, r.date.strftime("%d %b")) msg = "Het contract voor de zaalhuur van de %s op %s (%s) moet getekend worden. (%s)" % ( r.room, r.date.strftime("%d %B %Y"), r.timeframe, r.name) em = EmailMessage(subj, msg, to=[r.hirer.notifications_to], cc=settings.MODERATORS) em.send() messages.success(request, "Mail over tekenverzoek verzonden.") else: messages.info( request, "Mail over tekenverzoek niet verzonden. Er is geen e-mailadres ingesteld voor %s." % r.hirer) elif newstate == "cancelled": if r.hirer.notifications_to: subj = "[Zaalrooster] Reservering %s op %s geannuleerd" % ( r.room, r.date.strftime("%d %b")) msg = "De zaalhuur van de %s op %s (%s) is geannuleerd. (%s)" % ( r.room, r.date.strftime("%d %B %Y"), r.timeframe, r.name) em = EmailMessage(subj, msg, to=[r.hirer.notifications_to], cc=settings.MODERATORS) em.send() messages.success(request, "Reservering geannuleerd.") else: messages.info( request, "Mail over annulering niet verzonden. Er is geen e-mailadres ingesteld voor %s." % r.hirer) elif not request.user.vvsuser.can_moderate and newstate == "cancel_request": subj = "[Zaalrooster] Verzoek reservering %s op %s te annuleren" % ( r.room, r.date.strftime("%d %b")) msg = "%s wil de zaalhuur van de %s op %s (%s) annuleren. (%s)" % ( r.hirer, r.room, r.date.strftime("%d %B %Y"), r.timeframe, r.name) em = EmailMessage(subj, msg, to=settings.MODERATORS, cc=[request.user.email]) em.send() messages.success(request, "Annulering aangevraagd.") return HttpResponseRedirect( reverse('month-view', kwargs=split_date(r.date, include_day=False))) else: messages.info( request, "Status gewijzigd. Er is geen automatische e-mail verzonden!") return HttpResponseRedirect(r.date.strftime("/%Y/%m/%d/") + r.state + "/")
def main(): parser = argparse.ArgumentParser() parser.add_argument("csv_file", help=ARG_HELP_STRINGS["csv_file"]) parser.add_argument("-b", "--bypass-cert-verification", action="store_true", help=ARG_HELP_STRINGS["bypass"]) parser.add_argument("-d", "--offline_doaj", help=ARG_HELP_STRINGS["offline_doaj"]) parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"]) parser.add_argument("-f", "--force", action="store_true", help=ARG_HELP_STRINGS["force"]) parser.add_argument("-i", "--ignore-header", action="store_true", help=ARG_HELP_STRINGS["ignore_header"]) parser.add_argument("-j", "--force-header", action="store_true", help=ARG_HELP_STRINGS["force_header"]) parser.add_argument("-l", "--locale", help=ARG_HELP_STRINGS["locale"]) parser.add_argument("-u", "--add-unknown-columns", action="store_true", help=ARG_HELP_STRINGS["unknown_columns"]) parser.add_argument("-v", "--verbose", action="store_true", help=ARG_HELP_STRINGS["verbose"]) parser.add_argument("-institution", "--institution_column", type=int, help=ARG_HELP_STRINGS["institution"]) parser.add_argument("-period", "--period_column", type=int, help=ARG_HELP_STRINGS["period"]) parser.add_argument("-doi", "--doi_column", type=int, help=ARG_HELP_STRINGS["doi"]) parser.add_argument("-euro", "--euro_column", type=int, help=ARG_HELP_STRINGS["euro"]) parser.add_argument("-is_hybrid", "--is_hybrid_column", type=int, help=ARG_HELP_STRINGS["is_hybrid"]) parser.add_argument("-publisher", "--publisher_column", type=int, help=ARG_HELP_STRINGS["publisher"]) parser.add_argument("-journal_full_title", "--journal_full_title_column", type=int, help=ARG_HELP_STRINGS["journal_full_title"]) parser.add_argument("-issn", "--issn_column", type=int, help=ARG_HELP_STRINGS["issn"]) parser.add_argument("-url", "--url_column", type=int, help=ARG_HELP_STRINGS["url"]) parser.add_argument("-start", type=int, help=ARG_HELP_STRINGS["start"]) parser.add_argument("-end", type=int, help=ARG_HELP_STRINGS["end"]) args = parser.parse_args() enc = None # CSV file encoding handler = logging.StreamHandler(sys.stderr) handler.setFormatter(ANSIColorFormatter()) bufferedHandler = BufferedErrorHandler(handler) bufferedHandler.setFormatter(ANSIColorFormatter()) logging.root.addHandler(handler) logging.root.addHandler(bufferedHandler) logging.root.setLevel(logging.INFO) if args.locale: norm = locale.normalize(args.locale) if norm != args.locale: print "locale '{}' not found, normalized to '{}'".format( args.locale, norm) try: loc = locale.setlocale(locale.LC_ALL, norm) print "Using locale", loc except locale.Error as loce: print "Setting locale to " + norm + " failed: " + loce.message sys.exit() if args.encoding: try: codec = codecs.lookup(args.encoding) print("Encoding '{}' found in Python's codec collection " + "as '{}'").format(args.encoding, codec.name) enc = args.encoding except LookupError: print("Error: '" + args.encoding + "' not found Python's " + "codec collection. Either look for a valid name here " + "(https://docs.python.org/2/library/codecs.html#standard-" + "encodings) or omit this argument to enable automated " + "guessing.") sys.exit() result = oat.analyze_csv_file(args.csv_file) if result["success"]: csv_analysis = result["data"] print csv_analysis else: print result["error_msg"] sys.exit() if enc is None: enc = csv_analysis.enc dialect = csv_analysis.dialect has_header = csv_analysis.has_header or args.force_header if enc is None: print("Error: No encoding given for CSV file and automated " + "detection failed. Please set the encoding manually via the " + "--enc argument") sys.exit() doaj_offline_analysis = None if args.offline_doaj: if os.path.isfile(args.offline_doaj): doaj_offline_analysis = oat.DOAJOfflineAnalysis(args.offline_doaj) else: oat.print_r("Error: " + args.offline_doaj + " does not seem " "to be a file!") csv_file = open(args.csv_file, "r") reader = oat.UnicodeReader(csv_file, dialect=dialect, encoding=enc) first_row = reader.next() num_columns = len(first_row) print "\nCSV file has {} columns.".format(num_columns) csv_file.seek(0) reader = oat.UnicodeReader(csv_file, dialect=dialect, encoding=enc) column_map = OrderedDict([ ("institution", CSVColumn("institution", CSVColumn.MANDATORY, args.institution_column)), ("period", CSVColumn("period", CSVColumn.MANDATORY, args.period_column)), ("euro", CSVColumn("euro", CSVColumn.MANDATORY, args.euro_column)), ("doi", CSVColumn("doi", CSVColumn.MANDATORY, args.doi_column)), ("is_hybrid", CSVColumn("is_hybrid", CSVColumn.MANDATORY, args.is_hybrid_column)), ("publisher", CSVColumn("publisher", CSVColumn.OPTIONAL, args.publisher_column)), ("journal_full_title", CSVColumn("journal_full_title", CSVColumn.OPTIONAL, args.journal_full_title_column)), ("issn", CSVColumn("issn", CSVColumn.OPTIONAL, args.issn_column)), ("issn_print", CSVColumn("issn_print", CSVColumn.NONE, None)), ("issn_electronic", CSVColumn("issn_electronic", CSVColumn.NONE, None)), ("license_ref", CSVColumn("license_ref", CSVColumn.NONE, None)), ("indexed_in_crossref", CSVColumn("indexed_in_crossref", CSVColumn.NONE, None)), ("pmid", CSVColumn("pmid", CSVColumn.NONE, None)), ("pmcid", CSVColumn("pmcid", CSVColumn.NONE, None)), ("ut", CSVColumn("ut", CSVColumn.NONE, None)), ("url", CSVColumn("url", CSVColumn.OPTIONAL, args.url_column)), ("doaj", CSVColumn("doaj", CSVColumn.NONE, None)) ]) # Do not quote the values in the 'period' and 'euro' columns quotemask = [ True, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, ] header = None if has_header: for row in reader: if not row: # Skip empty lines continue header = row # First non-empty row should be the header if args.ignore_header: print "Skipping header analysis due to command line argument." break else: print "\n *** Analyzing CSV header ***\n" for (index, item) in enumerate(header): column_type = oat.get_column_type_from_whitelist(item) if column_type is not None and column_map[ column_type].index is None: column_map[column_type].index = index column_map[column_type].column_name = item print("Found column named '{}' at index {}, " + "assuming this to be the {} column.").format( item, index, column_type) break print "\n *** Starting heuristical analysis ***\n" for row in reader: if not row: # Skip empty lines # We analyze the first non-empty line, a possible header should # have been processed by now. continue column_candidates = {"doi": [], "period": [], "euro": []} for (index, entry) in enumerate(row): if index in [csvcolumn.index for csvcolumn in column_map.values()]: # Skip columns already assigned continue entry = entry.strip() # Search for a DOI if column_map['doi'].index is None: if oat.DOI_RE.match(entry): column_id = str(index) # identify column either numerical or by column header if header: column_id += " ('" + header[index] + "')" print("The entry in column {} looks like a " + "DOI: {}").format(column_id, entry) column_candidates['doi'].append(index) continue # Search for a potential year string if column_map['period'].index is None: try: maybe_period = int(entry) now = datetime.date.today().year # Should be a wide enough margin if maybe_period >= 2000 and maybe_period <= now + 2: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print("The entry in column {} looks like a " + "potential period: {}").format(column_id, entry) column_candidates['period'].append(index) continue except ValueError: pass # Search for a potential monetary amount if column_map['euro'].index is None: try: maybe_euro = locale.atof(entry) # Are there APCs above 6000€ ?? if maybe_euro >= 10 and maybe_euro <= 6000: column_id = str(index) if header: column_id += " ('" + header[index] + "')" print("The entry in column {} looks like a " + "potential euro amount: {}").format( column_id, entry) column_candidates['euro'].append(index) continue except ValueError: pass for column_type, candidates in column_candidates.iteritems(): if column_map[column_type].index is not None: continue if len(candidates) > 1: print("Could not reliably identify the '" + column_type + "' column - more than one possible candiate!") elif len(candidates) < 1: print "No candidate found for column '" + column_type + "'!" else: index = candidates.pop() column_map[column_type].index = index if header: column_id = header[index] column_map[column_type].column_name = column_id else: column_id = index print("Assuming column '{}' to be the '{}' " + "column.").format(column_id, column_type) column_map[column_type].index = index break # Wrap up: Check if there any mandatory column types left which have not # yet been identified - we cannot continue in that case (unless forced). unassigned = filter( lambda (k, v): v.requirement == CSVColumn.MANDATORY and v.index is None, column_map.iteritems()) if unassigned: for item in unassigned: print "The {} column is still unidentified.".format(item[0]) if header: print "The CSV header is:\n" + dialect.delimiter.join(header) if not args.force: print("ERROR: We cannot continue because not all mandatory " + "column types in the CSV file could be automatically " + "identified. There are 2 ways to fix this:") if not header: print( "1) Add a header row to your file and identify the " + "column(s) by assigning them an appropiate column name.") else: print( "1) Identify the missing column(s) by assigning them " + "a different column name in the CSV header (You can " + "use the column name(s) mentioned in the message above)") print("2) Use command line parameters when calling this script " + "to identify the missing columns (use -h for help) ") sys.exit() else: print("WARNING: Not all mandatory column types in the CSV file " + "could be automatically identified - forced to continue.") print "\n *** CSV file analysis summary ***\n" index_dict = {csvc.index: csvc for csvc in column_map.values()} for index in range(num_columns): column_name = "" if header: column_name = header[index] if index in index_dict: column = index_dict[index] msg = "column number {} ({}) is the {} column '{}'".format( index, column_name, column.requirement, column.column_type) if column.requirement in [CSVColumn.MANDATORY, CSVColumn.OPTIONAL]: oat.print_g(msg) else: oat.print_b(msg) else: if args.add_unknown_columns: msg = ( "column number {} ({}) is an unknown column, it will be " + "appended to the generated CSV file") oat.print_y(msg.format(index, column_name)) if not column_name: # Use a generic name column_name = "unknown" while column_name in column_map.keys(): # TODO: Replace by a numerical, increasing suffix column_name += "_" column_map[column_name] = CSVColumn(column_name, CSVColumn.NONE, index) else: msg = ( "column number {} ({}) is an unknown column, it will be " + "ignored") oat.print_y(msg.format(index, column_name)) print "" for column in column_map.values(): if column.index is None: msg = "The {} column '{}' could not be identified." print msg.format(column.requirement, column.column_type) # Check for unassigned optional column types. We can continue but should # issue a warning as all entries will need a valid DOI in this case. unassigned = filter( lambda (k, v): v.requirement == CSVColumn.OPTIONAL and v.index is None, column_map.iteritems()) if unassigned: print("\nWARNING: Not all optional column types could be " + "identified. Metadata aggregation is still possible, but " + "every entry in the CSV file will need a valid DOI.") start = raw_input("\nStart metadata aggregation? (y/n):") while start not in ["y", "n"]: start = raw_input("Please type 'y' or 'n':") if start == "n": sys.exit() print "\n *** Starting metadata aggregation ***\n" enriched_content = [] csv_file.seek(0) reader = oat.UnicodeReader(csv_file, dialect=dialect, encoding=enc) header_processed = False row_num = 0 for row in reader: row_num += 1 if not row: continue # skip empty lines if not header_processed: header_processed = True enriched_content.append(column_map.keys()) if has_header: # If the CSV file has a header, we are currently there - skip it # to get to the first data row continue if args.start and args.start > row_num: continue if args.end and args.end < row_num: continue print "---Processing line number " + str(row_num) + "---" enriched_row = oat.process_row(row, row_num, column_map, num_columns, doaj_offline_analysis, args.bypass_cert_verification) enriched_content.append(enriched_row) csv_file.close() with open('out.csv', 'w') as out: writer = oat.OpenAPCUnicodeWriter(out, quotemask, True, True) writer.write_rows(enriched_content) if not bufferedHandler.buffer: oat.print_g("Metadata enrichment successful, no errors occured") else: oat.print_r("There were errors during the enrichment process:\n") # closing will implicitly flush the handler and print any buffered # messages to stderr bufferedHandler.close()
def process(enc): ln = locale._build_localename((lang, enc)) yield ln nln = locale.normalize(ln) if nln != ln: yield nln
def inschrijven(request, year, month, day): setlocale(LC_TIME, normalize('nl_NL')) date = datetime.date(int(year), int(month), int(day)) ReservationForm = getReservationForm(request.user) if request.method == 'POST': r = Reservation(date=date) form = ReservationForm(request.POST, instance=r) if form.is_valid(): form.save(commit=False) if not request.user.vvsuser.can_moderate: if room_is_free_at(r.room, date): r.state = 'pending' messages.success(request, "Je reservering is aangevraagd.") else: r.state = 'backup' messages.warning( request, "Er staan ook andere reserveringen open voor de %s op %s. Je bent op de reservelijst gezet." % (r.room, r.date.strftime("%d %B %Y"))) else: messages.success(request, "Reservering geplaatst.") r.save() month_url = reverse('month-view', kwargs={ 'year': year, 'month': month }) if not request.user.vvsuser.can_moderate: subj = "[Zaalrooster] Aanvraag van %s voor %s" % ( r.hirer, date.strftime("%d %b")) msg = "%s wil graag de %s op %s (%s) voor een %s.\nHuidige status: %s\n\n%s" % ( r.hirer, r.room, date.strftime("%d %B %Y"), r.timeframe, r.name, r.getHumanState(), settings.SITE_URL + month_url) em = EmailMessage(subj, msg, to=settings.MODERATORS, cc=[request.user.email]) em.send() return HttpResponseRedirect(month_url) else: form = ReservationForm() datehuman = date.strftime("%d %B %Y") frs = list( FixedRent.objects.filter( begin__lte=date, end__gte=date, weekday=date.isoweekday(), hirer__in=request.user.vvsuser.cached_vvsgroups).order_by('hirer')) if len(frs) > 0: for fre in FixedRentException.objects.filter(date=date, rental__in=frs): # Verwijder hem en zet hem er opnieuw in met cancelled = True # Direct wijzigen werkte niet en zo komt hij ook mooi onderaan frs.remove(fre.rental) fre.rental.cancelled = True frs.append(fre.rental) rs = list( Reservation.objects.filter( date=date, hirer__in=request.user.vvsuser.cached_vvsgroups).order_by('hirer')) return render_to_response('inschrijven.html', dict( split_date(date), **{ 'form': form, 'datehuman': datehuman, 'fixedrents': frs, 'reservations': rs, 'cancellable_states': cancellable_states }), context_instance=RequestContext(request))
class sbdatetime(datetime.datetime): has_locale = True en_US_norm = locale.normalize('en_US.utf-8') @static_or_instance def convert_to_setting(self, dt=None): try: if sickbeard.TIMEZONE_DISPLAY == 'local': if self is None: return dt.astimezone(sb_timezone) else: return self.astimezone(sb_timezone) else: if self is None: return dt else: return self except: if self is None: return dt else: return self # display Time in SickRage Format @static_or_instance def sbftime(self, dt=None, show_seconds=False, t_preset=None): try: locale.setlocale(locale.LC_TIME, '') except: pass try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except Exception as e: try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm) except: sbdatetime.has_locale = False strt = '' try: if self is None: if dt is not None: if t_preset is not None: strt = dt.strftime(t_preset) elif show_seconds: strt = dt.strftime(sickbeard.TIME_PRESET_W_SECONDS) else: strt = dt.strftime(sickbeard.TIME_PRESET) else: if t_preset is not None: strt = self.strftime(t_preset) elif show_seconds: strt = self.strftime(sickbeard.TIME_PRESET_W_SECONDS) else: strt = self.strftime(sickbeard.TIME_PRESET) finally: try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, '') except: sbdatetime.has_locale = False return strt # display Date in SickRage Format @static_or_instance def sbfdate(self, dt=None, d_preset=None): try: locale.setlocale(locale.LC_TIME, '') except: pass strd = '' try: if self is None: if dt is not None: if d_preset is not None: strd = dt.strftime(d_preset) else: strd = dt.strftime(sickbeard.DATE_PRESET) else: if d_preset is not None: strd = self.strftime(d_preset) else: strd = self.strftime(sickbeard.DATE_PRESET) finally: try: locale.setlocale(locale.LC_TIME, '') except: pass return strd # display Datetime in SickRage Format @static_or_instance def sbfdatetime(self, dt=None, show_seconds=False, d_preset=None, t_preset=None): try: locale.setlocale(locale.LC_TIME, '') except: pass strd = '' try: if self is None: if dt is not None: if d_preset is not None: strd = dt.strftime(d_preset) else: strd = dt.strftime(sickbeard.DATE_PRESET) try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except: try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm) except: sbdatetime.has_locale = False if t_preset is not None: strd += u', ' + dt.strftime(t_preset) elif show_seconds: strd += u', ' + dt.strftime( sickbeard.TIME_PRESET_W_SECONDS) else: strd += u', ' + dt.strftime(sickbeard.TIME_PRESET) else: if d_preset is not None: strd = self.strftime(d_preset) else: strd = self.strftime(sickbeard.DATE_PRESET) try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, 'en_US') except: try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, sbdatetime.en_US_norm) except: sbdatetime.has_locale = False if t_preset is not None: strd += u', ' + self.strftime(t_preset) elif show_seconds: strd += u', ' + self.strftime( sickbeard.TIME_PRESET_W_SECONDS) else: strd += u', ' + self.strftime(sickbeard.TIME_PRESET) finally: try: if sbdatetime.has_locale: locale.setlocale(locale.LC_TIME, '') except: sbdatetime.has_locale = False return strd
def month_view(request, year=None, month=None): setlocale(LC_TIME, normalize('nl_NL')) today = datetime.date.today() if year is None: year = today.year else: year = int(year) if month is None: month = today.month else: month = int(month) firstDOM = datetime.date(year, month, 1) monthhuman = firstDOM.strftime("%B") firstDIC = firstDOM - datetime.timedelta( days=(firstDOM.isoweekday() + 5) % 7 + 1) assert (firstDIC.isoweekday() == 1) lastDIC = firstDIC + datetime.timedelta(days=6 * 7 - 1) assert (lastDIC.isoweekday() == 7) rooms = dict(map(lambda x: (x.id, x.name), Room.objects.all())) can_subscribe = len(request.user.vvsuser.cached_vvsgroups) > 0 dmap = {} table = [] dt = firstDIC for w in xrange(6): roomrows = [] rowdates = [] for d in xrange(7): dmap[dt] = {} for room in rooms: dmap[dt][room] = { 'fixedRent': [], 'fixedRentExceptions': [], 'reservations': [] } rowdates.append(dt) dt += datetime.timedelta(days=1) for room in rooms: roomrow = {'room': rooms[room], 'id': room, 'days': []} for rd in rowdates: roomrow['days'].append({ 'date': rd.strftime("%d %b"), 'subscribeURL': reverse('inschrijven', kwargs=split_date(rd)) if can_subscribe and rd >= today else None, 'activeMonth': (rd.month == month), 'fixedRent': dmap[rd][room]['fixedRent'], 'fixedRentExceptions': dmap[rd][room]['fixedRentExceptions'], 'reservations': dmap[rd][room]['reservations'], }) roomrows.append(roomrow) table.append(roomrows) for fr in FixedRent.objects.filter(begin__lte=lastDIC, end__gte=firstDIC): dt = firstDIC + datetime.timedelta(days=fr.weekday - 1) assert (dt.isoweekday() == fr.weekday) for i in xrange(6): if fr.is_active_at(dt): dmap[dt][fr.room.id]['fixedRent'].append(fr) dt += datetime.timedelta(days=7) for fre in FixedRentException.objects.filter(date__range=(firstDIC, lastDIC)): dmap[fre.date][fre.rental.room.id]['fixedRentExceptions'].append(fre) try: dmap[fre.date][fre.rental.room.id]['fixedRent'].remove(fre.rental) except ValueError: # Een exception zonder dat er dan vast huur is... pass for r in Reservation.objects.filter( date__range=(firstDIC, lastDIC)).order_by('state'): dmap[r.date][r.room.id]['reservations'].append(r) if month == 1: prev = reverse('month-view', kwargs={'year': year - 1, 'month': "12"}) else: prev = reverse('month-view', kwargs={ 'year': year, 'month': "%02d" % (month - 1) }) if month == 12: next = reverse('month-view', kwargs={'year': year + 1, 'month': "01"}) else: next = reverse('month-view', kwargs={ 'year': year, 'month': "%02d" % (month + 1) }) return render_to_response('month.html', { 'year': year, 'month': month, 'monthhuman': monthhuman, 'rooms': rooms, 'table': table, 'next': next, 'prev': prev }, context_instance=RequestContext(request))