Example #1
0
def init():
	global __enabled__
	global __installed__
	global __translation__

	if not __installed__:
		try:
			locale.setlocale(locale.LC_ALL, "")
		except locale.Error:
			__translation__ = gettext.NullTranslations()
		else:
			lang = locale.getlocale()

			#Fix for non-POSIX-compliant systems (Windows et al.).
			if os.getenv('LANG') is None:
				lang = locale.getdefaultlocale()
				os.environ['LANG'] = lang[0]

			filename = basedir.get_basedir() + "/translations/messages_%s.mo" % lang[0][0:2]

			try:
				__translation__ = gettext.GNUTranslations(open(filename, "rb"))
			except IOError:
				__translation__ = gettext.NullTranslations()

		__enabled__ = True
		__installed__ = True
		__translation__.install(True)
Example #2
0
 def __init__(self, *a, **kw):
     super(TranslatedBuildTest, self).__init__(*a, **kw)
     try:
         self.oldlocale = locale.getlocale()
         locale.setlocale(locale.LC_ALL, ("pl_PL", "utf8"))
     except:
         pytest.skip()
Example #3
0
	def parse(self, filename):
		locale.setlocale(locale.LC_TIME, 'C');
		f = open(filename)
		head = f.readline().split(";")[:-1]
		for line in f.readlines():
			x = 0
			data = {}
			for column in line.split(";")[:-1]:
				data[head[x]] = column
				x += 1
			
			crypts = []
			for crypt in data["Encryption"].split(","):
				crypts.append(crypt.lower().replace("-","_"))
				
			self.networks[data["BSSID"]] = {
				"type": data["NetType"],
				"channel": int(data["Channel"]),
				"firsttime": timestring2timestamp(data["FirstTime"]),
				"lasttime": timestring2timestamp(data["LastTime"]),
				"lat": float(data["GPSBestLat"]),
				"lon": float(data["GPSBestLon"]),
				"manuf": "",
				"ssid": data["ESSID"],
				"cryptset": encode_cryptset(crypts)
			}
		locale.setlocale(locale.LC_TIME, '')
		f.close()
Example #4
0
def set_iface_lang():
    # TODO: internationalize.  This seems the best place to put this
    # (used for formatting of large numbers to break them up with ",").
    # unfortunately, not directly compatible with gettext
    locale.setlocale(locale.LC_ALL, g.locale)
    lang = [g.lang]
    # GET param wins
    if c.host_lang:
        lang = [c.host_lang]
    else:
        lang = [c.user.pref_lang]

    if getattr(g, "lang_override") and lang[0] == "en":
        lang.insert(0, g.lang_override)

    #choose the first language
    c.lang = lang[0]

    #then try to overwrite it if we have the translation for another
    #one
    for l in lang:
        try:
            h.set_lang(l, fallback_lang=g.lang)
            c.lang = l
            break
        except h.LanguageError:
            #we don't have a translation for that language
            h.set_lang(g.lang, graceful_fail = True)

    #TODO: add exceptions here for rtl languages
    if c.lang in ('ar', 'he', 'fa'):
        c.lang_rtl = True
Example #5
0
def main():
    """Entry point when called on the command-line.
    """
    # Locale
    locale.setlocale(locale.LC_ALL, '')

    parser = argparse.ArgumentParser(
        description="api_stats records the history of some data retrieved "
                    "from an API")
    parser.add_argument('-v', '--verbose', action='count', default=1,
                        dest='verbosity',
                        help="augments verbosity level")
    parser.add_argument('-n', '--dry-run', action='store_true',
                        help="don't update the data on disk")
    parser.add_argument('configuration', help="configuration file to process")
    parser.add_argument('data', nargs=argparse.OPTIONAL,
                        help="file to update with the new data")

    args = parser.parse_args()
    if args.data is None and not args.dry_run:
        parser.error("No data file specified (or do you mean to use "
                     "--dry-run?)")
        raise RuntimeError  # unreachable

    setup_logging(args.verbosity)

    process_configuration(None if args.dry_run else args.data,
                          args.configuration)
Example #6
0
def dutch_strptime(date, pattern):
     loc = locale.getlocale()
     locale.setlocale(locale.LC_ALL, 'nl_NL.UTF-8')
     try:
         return datetime.datetime.strptime(date, pattern)
     finally:
         locale.setlocale(locale.LC_ALL, loc)
 def setUp(self):
     try:
         import java
         java.util.Locale.setDefault(java.util.Locale.US)
     except ImportError:
         import locale
         locale.setlocale(locale.LC_TIME, 'C')
Example #8
0
def main ():
	
	APP = 'acqua'
	if os.name == 'nt':
		DIR = os.path.join (utils.DHOME_DIR, "locale")
	else:
		DIR = os.path.join (utils.PROG_DIR, "locale")
	
	try:
		
		if impostazioni.get ("lang").lower () == "en":
			en = gettext.translation (APP, DIR, ["en"])
			en.install ()
			try:
				os.environ['LANG'] = "en_US"
				locale.setlocale (locale.LC_MESSAGES, "en_US")
			except: pass
		elif impostazioni.get ("lang").lower () == "fr":
			fr = gettext.translation (APP, DIR, ["fr"])
			fr.install ()
			try:
				os.environ['LANG'] = "fr_FR"
				locale.setlocale (locale.LC_MESSAGES, "fr_FR")
			except: pass
		else:
			# In teoria qui risulta inutile settare. Il linguaggio italiano e' di default senza gettext.
			os.environ['LANG'] = "it_IT"
			it = gettext.translation (APP, DIR, [])
			it.install ()

	except (IOError, locale.Error), e:
		print "(%s): WARNING **: %s" % (APP, e)
		__builtins__.__dict__["_"] = gettext.gettext
Example #9
0
 def beforeDrawPage(self, canvas, doc):
     canvas.setFont(serif_font, 8)
     canvas.saveState()
     if pdfstyles.show_title_page_footer:
         canvas.line(footer_margin_hor, footer_margin_vert, page_width - footer_margin_hor, footer_margin_vert)
         footertext = [_(titlepagefooter)]
         if pdfstyles.show_creation_date:
             locale.setlocale(locale.LC_ALL, "")
             footertext.append(
                 pdfstyles.creation_date_txt % time.strftime(pdfstyles.creation_date_format, time.localtime())
             )
         lines = [formatter.cleanText(line, escape=False) for line in footertext]
         txt = "<br/>".join(line if isinstance(line, unicode) else unicode(line, "utf-8") for line in lines)
         p = Paragraph(txt, text_style(mode="footer"))
         w, h = p.wrap(print_width, print_height)
         canvas.translate((page_width - w) / 2.0, footer_margin_vert - h - 0.25 * cm)
         p.canv = canvas
         p.draw()
     canvas.restoreState()
     if self.cover:
         width, height = self._scale_img(pdfstyles.title_page_image_size, self.cover)
         if pdfstyles.title_page_image_pos[0] is None:
             x = (page_width - width) / 2.0
         else:
             x = max(0, min(page_width - width, pdfstyles.title_page_image_pos[0]))
         if pdfstyles.title_page_image_pos[1] is None:
             y = (page_height - height) / 2.0
         else:
             y = max(0, min(page_height - height, pdfstyles.title_page_image_pos[1]))
         canvas.drawImage(self.cover, x, y, width, height)
Example #10
0
	def activateLanguage(self, index):
		try:
			if index not in self.lang:
				print "Selected language %s does not exist, fallback to en_EN!" % index
				index = "en_EN"
			lang = self.lang[index]
			print "Activating language " + lang[0]
			self.catalog = gettext.translation('enigma2', resolveFilename(SCOPE_LANGUAGE, ""), languages=[index], fallback=True)
			self.catalog.install(names=("ngettext", "pgettext"))
			self.activeLanguage = index
			for x in self.callbacks:
				x()
		except:
			print "Error in activating language!"
		# NOTE: we do not use LC_ALL, because LC_ALL will not set any of the categories, when one of the categories fails.
		# We'd rather try to set all available categories, and ignore the others
		for category in [locale.LC_CTYPE, locale.LC_COLLATE, locale.LC_TIME, locale.LC_MONETARY, locale.LC_MESSAGES, locale.LC_NUMERIC]:
			try:
				locale.setlocale(category, (self.getLanguage(), 'UTF-8'))
			except:
				pass
		# HACK: sometimes python 2.7 reverts to the LC_TIME environment value, so make sure it has the correct value
		os.environ["LC_TIME"] = self.getLanguage() + '.UTF-8'
		os.environ["LANGUAGE"] = self.getLanguage() + '.UTF-8'
		os.environ["GST_SUBTITLE_ENCODING"] = self.getGStreamerSubtitleEncoding()
def bindtextdomain(app_name, locale_dir=None):
    """    
    Bind the domain represented by app_name to the locale directory locale_dir.
    It has the effect of loading translations, enabling applications for different
    languages.

    app_name:
        a domain to look for translations, tipically the name of an application.

    locale_dir:
        a directory with locales like locale_dir/lang_isocode/LC_MESSAGES/app_name.mo
        If omitted or None, then the current binding for app_name is used.
    """    
    try:
        import locale
        import gettext
        locale.setlocale(locale.LC_ALL, "")
        gtk.glade.bindtextdomain(app_name, locale_dir)
        gettext.install(app_name, locale_dir, unicode=1)
    except (IOError,locale.Error), e:
        #force english as default locale
        try:
            os.environ["LANGUAGE"] = "en_US.UTF-8"
            locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
            gtk.glade.bindtextdomain(app_name, locale_dir)
            gettext.install(app_name, locale_dir, unicode=1)
            return
        except:
            #english didnt work, just use spanish
            try:
                __builtins__.__dict__["_"] = lambda x : x        
            except:
                __builtins__["_"] = lambda x : x
Example #12
0
 def run(self):
     # Get the locale encoding
     locale.setlocale(locale.LC_ALL, '')
     self._code = locale.getpreferredencoding()
     
     # Start the curses main loop
     curses.wrapper(self._curses_main)
Example #13
0
 def process_item(self, item, spider):
     conn = psycopg2.connect("dbname=mydb")
     cur = conn.cursor()
     locale.setlocale(locale.LC_TIME, "fr_FR")
     if item['tome']:
         for x in item['tome']:
             if 'Tome' in x:
                 item['tome'] = [x[9:]]
             else:
                 raise DropItem("Missing price in %s" % item)
     if item['release_date']:
         for x in item['release_date']:
             if 'Sortie' in x:
                 x = x.encode('utf8')
                 new_date = unicode(str(datetime.date(datetime.strptime(x[10:] ,'%d %B %Y'))))
                 item['release_date'] = [new_date]
     if item['name']:
         for x in item['name']:
             item['name'] = [re.sub(r'\s[tT]\d{2}', '', x)]
     if item['cover']:
         for x in item['cover']:
             if 'provisoire' in x:
                 item['cover'] = ['']
             else:
                 item['cover'] = [x[:-14]]
     cur.execute("INSERT INTO pika (name, cover, collection, tome, release_date) VALUES (%s, %s, %s, %s, %s)", (item['name'][0], item['cover'][0], item['collection'][0], item['tome'][0], item['release_date'][0]))
     conn.commit()
     # line = json.dumps(dict(item)) + "\n"
     # self.file.write(line)
     cur.close()
     conn.close()
     return item
Example #14
0
def one_time_setup():
    lfs_locale = getattr(settings, "LFS_LOCALE", None)
    if lfs_locale:
        try:
            locale.setlocale(locale.LC_ALL, lfs_locale)
        except locale.Error, e:
            logger.error("Unsupported locale in settings.LFS_LOCALE: '%s'." % lfs_locale)
Example #15
0
        def inner(*args, **kwds):
            try:
                import locale
                category = getattr(locale, catstr)
                orig_locale = locale.setlocale(category)
            except AttributeError:
                # if the test author gives us an invalid category string
                raise
            except:
                # cannot retrieve original locale, so do nothing
                locale = orig_locale = None
            else:
                for loc in locales:
                    try:
                        locale.setlocale(category, loc)
                        break
                    except:
                        pass

            # now run the function, resetting the locale on exceptions
            try:
                return func(*args, **kwds)
            finally:
                if locale and orig_locale:
                    locale.setlocale(category, orig_locale)
def create_monitor_figure(post_id, datatype, monitor_database, title):
  locale.setlocale(locale.LC_ALL, 'en_US.utf8')
  post_id = int(post_id)
  data = monitor_database.find({"post_id":post_id}).sort("overall_seconds",1)
  x = []
  if datatype == "shares":
    vk, fb, tw = [],[],[]
    vk_f, fb_f, tw_f  = "vkontakte_data","facebook_data", "twitter_data"
    for datum in data:
      x.append(datetime(datum["year"], datum["month"], datum["day"], datum["hour"], datum["minute"]))
      vk.append(datum[vk_f]); fb.append(datum[fb_f]); tw.append(datum[tw_f])
    x = [e + timedelta(hours=4) for e in x]
    fig_url = visualize_shares_post(x,vk,fb,tw,post_id,title)
  else:
    y = []
    if datatype == "pageview":
      field = "views"
    if datatype == "favorites":
      field = "favorite"
    for datum in data:
      x.append(datetime(datum["year"], datum["month"], datum["day"], datum["hour"], datum["minute"]))
      y.append(datum[field])
    x = [e + timedelta(hours=4) for e in x]
    fig_url = visualize_post_plotly(x,y,field, post_id, title)
  return fig_url
Example #17
0
File: bid.py Project: rdw88/Joblet
def accept(args):
	bid = Bid.objects.get(bid_id=args['bid_id'])
	bid.__dict__['status'] = 1
	bid.save()
	bidder_profile = None

	try:
		bidder_profile = Profile.objects.get(email=bid.bidder_email)
	except Profile.DoesNotExist:
		return False, ERROR_NO_SUCH_PROFILE

	recent_jobs = json.loads(bidder_profile.recent_jobs)

	if len(recent_jobs) == NUM_RECENT_JOBS:
		del recent_jobs[0]

	recent_jobs.append(bid.listing_id)
	bidder_profile.__dict__['recent_jobs'] = json.dumps(recent_jobs)
	bidder_profile.save()

	listing = Listing.objects.get(listing_id=bid.listing_id)
	listing.__dict__['current_bid'] = bid.amount
	listing.__dict__['last_accepted_bid'] = bid.bid_id
	listing.save()

	locale.setlocale(locale.LC_ALL, '')
	notification_title = 'Bid Accepted!'
	notification_description = 'Your bid of %s for %s was accepted!' % (locale.currency(float(bid.amount)), listing.job_title)

	notification.create(notification_title, notification_description, bidder_profile.email, bidder_profile.password, None)

	return True, None
Example #18
0
def teardown_package():
    global orig_collate
    locale.setlocale(locale.LC_COLLATE, orig_collate)

    # clean up containers and objects left behind after running tests
    global config

    if config:
        conn = Connection(config)
        conn.authenticate()
        account = Account(conn, config.get('account', config['username']))
        account.delete_containers()

    global in_process
    global _test_socks
    if in_process:
        try:
            for i, server in enumerate(_test_coros):
                server.kill()
                if not server.dead:
                    # kill it from the socket level
                    _test_socks[i].close()
        except Exception:
            pass
        try:
            rmtree(os.path.dirname(_testdir))
        except Exception:
            pass

        reset_globals()
def visualize_shares_post(dates,vk,fb,tw, post_id, title):
  locale.setlocale(locale.LC_ALL, 'en_US.utf8')
  api_key      = os.environ.get("PLOTLY_KEY_API")
  py.sign_in('SergeyParamonov', api_key)
  vk_trace = Scatter(
                     x=dates,
                     y=vk,
                     mode='lines+markers',
                     name=u"Вконтакте"
  )
  fb_trace = Scatter(
                     x=dates,
                     y=fb,
                     mode='lines+markers',
                     name=u"Facebook"
  )
  tw_trace = Scatter(
                     x=dates,
                     y=tw,
                     mode='lines+markers',
                     name=u"Twitter"
  )
  data = Data([vk_trace,fb_trace,tw_trace])
  layout = Layout(title=u"Репосты: " + title,
      xaxis= XAxis(title=u"Московское время"), # x-axis title
      yaxis= YAxis(title=u"Репосты"), # y-axis title
      hovermode='closest', # N.B hover -> closest data pt
  )
  plotly_fig = Figure(data=data, layout=layout)
  plotly_filename = "monitor_post_id_" + str(post_id) + "_" + "shares"
  unique_url = py.plot(plotly_fig, filename=plotly_filename)
  return unique_url
Example #20
0
    def __init__(self, redirect = False):
        """Initializes the application."""
        wx.App.__init__(self, redirect = redirect)
        locale.setlocale(locale.LC_ALL, '')
        self.stories = []
        self.loadPrefs()
        self.determinePaths()
        self.loadTargetHeaders()

        # try to load our app icon
        # if it doesn't work, we continue anyway

        self.icon = wx.EmptyIcon()

        try:
            self.icon = wx.Icon(self.iconsPath + 'app.ico', wx.BITMAP_TYPE_ICO)
        except:
            pass


        # restore save location

        try:
            os.chdir(self.config.Read('savePath'))
        except:
            os.chdir(os.path.expanduser('~'))

        if not self.openOnStartup():
            if self.config.HasEntry('LastFile') \
            and os.path.exists(self.config.Read('LastFile')):
                self.open(self.config.Read('LastFile'))
            else:
                self.newStory()
Example #21
0
def set_locale(new_locale, lc_var=locale.LC_ALL):
    """Context manager for temporarily setting a locale.

    Parameters
    ----------
    new_locale : str or tuple
        A string of the form <language_country>.<encoding>. For example to set
        the current locale to US English with a UTF8 encoding, you would pass
        "en_US.UTF-8".

    Notes
    -----
    This is useful when you want to run a particular block of code under a
    particular locale, without globally setting the locale. This probably isn't
    thread-safe.
    """
    current_locale = locale.getlocale()

    try:
        locale.setlocale(lc_var, new_locale)

        try:
            normalized_locale = locale.getlocale()
        except ValueError:
            yield new_locale
        else:
            if all(lc is not None for lc in normalized_locale):
                yield '.'.join(normalized_locale)
            else:
                yield new_locale
    finally:
        locale.setlocale(lc_var, current_locale)
Example #22
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    locale_name = "en_US.UTF-8"
    logging.info("Setting locale to %s" % locale_name)
    locale.setlocale(locale.LC_ALL, locale_name)

    argument_parser = build_argument_parser(sys.argv)
    args = argument_parser.parse_args()

    config = read_config(args.configuration_file)

    packages_dir = os.path.join(args.work_dir, args.package_type)

    checkout_dir = os.path.dirname(os.path.abspath("."))

    available_packages = read_packages(config, checkout_dir)

    package_names = set(args.packages)
    assert_package_option_is_correct(argument_parser.format_usage(), package_names, available_packages)

    if "all" in package_names:
        packages = [p for p in available_packages]
    else:
        packages = [p for p in available_packages if p.name in package_names]

    packagers = make_packagers(config, args, packages_dir, checkout_dir, packages)

    for packager in packagers:
        packager.prepare_build()
        packager.build()
Example #23
0
def index(request):
    metalex_files = glob.glob(FILES_DIR + "*_ml.xml")
    r_count = len(metalex_files)
    if r_count == 0 :
        r_count = 27000
    
    # info = urllib2.urlopen("http://doc.metalex.eu:8000/status/size/")
    # html = info.read()
    # 
    # match = re.search(r'<th>Total</th><td>(.*?)</td>', html)
    # if match :
    #     t_count = long(match.group(1))
    # else :
    #     t_count = 100000000
    
    try :
        locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
    except :
        locale.setlocale(locale.LC_ALL, 'en_US')
        
    regulations_count = locale.format("%d", r_count, grouping=True)
    #triples_count = locale.format("%d", t_count, grouping=True)
    
    form = QueryForm()
    
    t = get_template('index.html')
    html = t.render(RequestContext(request, { 'regulations': regulations_count, 'form': form}))
    return HttpResponse(html)
    def __init__(self, player_sub):
        self.player_sub = player_sub
        # self.sea = sea
        self.display_screen = ''

        # setlocale enables UTF chars
        # see: https://docs.python.org/2/library/curses.html
        locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
        self.strcode = locale.getpreferredencoding()
        # print(self.strcode)
        screen = curses.initscr()

        self.screen = screen

        # Applications will also commonly need to react to keys instantly, without requiring the Enter key
        # to be pressed; this is called cbreak mode, as opposed to the usual buffered input mode.
        curses.cbreak()

        # Usually curses applications turn off automatic echoing of keys to the screen, in order to be able to read
        #  keys and only display them under certain circumstances. This requires calling the noecho() function.
        curses.noecho()


        screen.nodelay(1)

        curses.curs_set(0)
        screen.keypad(True)

        curses.start_color()
        curses.use_default_colors()
def main():
    """main funtion, called from within the curses.wrapper"""


    config = goxapi.GoxConfig("goxtool.ini")
    secret = goxapi.Secret()
    secret.prompt_decrypt() 
    gox = goxapi.Gox(secret, config)

      
    logwriter = LogWriter(gox)
    gox.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt as e:
        print "got Ctrl+C, trying to shut down cleanly."
    except Exception:
        gox.debug(traceback.format_exc())

    #gox.stop()
    #logwriter.close()
    # The End.

    for loc in ["en_US.UTF8", "en_GB.UTF8", "en_EN", "en_GB", "C"]:
        try:
            locale.setlocale(locale.LC_NUMERIC, loc)
            break
        except locale.Error:
            continue
Example #26
0
def setlocale(name):
    """Context manager for changing the current locale"""
    saved_locale = locale.setlocale(locale.LC_ALL)
    try:
        yield locale.setlocale(locale.LC_ALL, name)
    finally:
        locale.setlocale(locale.LC_ALL, saved_locale)
Example #27
0
 def setUp(self):
     super(TestPelican, self).setUp()
     self.temp_path = mkdtemp(prefix='pelicantests.')
     self.temp_cache = mkdtemp(prefix='pelican_cache.')
     self.maxDiff = None
     self.old_locale = locale.setlocale(locale.LC_ALL)
     locale.setlocale(locale.LC_ALL, str('C'))
Example #28
0
    def __init__(self, parent, windowparent, manager, managers = []):
        BaseGUI.__init__(self, parent, manager, managers)

        self.FILENAME = 'inventario/admin.ui'
        self.DialogAddClass  = AddProducto

        locale.setlocale( locale.LC_ALL, '' )
        self.setWindowIcon(QtGui.QIcon(':/newPrefix/logo.png'))
        self.ALINEACIONLISTA = ['C','L','L','C','C','C','C','C']
        self.ATRI_COMBO_BUSQUEDA = [
        {u'Descripcion del producto':Producto.descripcion},
        {u'Codigo':Producto.codigo},
        ]
        self.ATRIBUTOSLISTA = [
        {u'Codigo':Producto.codigo},
        {u'Categoria':Producto.categoria},
        {u'Descripcion del producto':Producto.descripcion},
        {u'Precio costo':Producto.precio_costo},
        {u'Precio venta':Producto.precio_venta},
        {u'Existencia':Producto.cantidad},
        {u'Inv. Mínimo':Producto.minimo},
        {u'Usa inventario':Producto.usa_inventario},
        ]
        self.windowparent = windowparent
        self._operaciones_de_inicio()
        self.windowparent.hide()
Example #29
0
    def __init__ (self, redirect = False):
        """Initializes the application."""
        wx.App.__init__(self, redirect = redirect)
        locale.setlocale(locale.LC_ALL, '')
        self.stories = []
        self.loadPrefs()

        # try to load our app icon under win32
        # if it doesn't work, we continue anyway

        self.icon = wx.EmptyIcon()
        
        if sys.platform == 'win32':
            try:
                self.icon = wx.Icon('icons' + os.sep + 'app.ico', wx.BITMAP_TYPE_ICO)
            except:
                pass
        
        # recent files
        # each of our StoryFrames shares the same menu
        
        self.recentFiles = wx.FileHistory(App.RECENT_FILES)
        self.recentFiles.Load(self.config)
        
        # restore save location

        os.chdir(self.config.Read('savePath'))
                   
        self.newStory()
Example #30
0
def get_locale_currency_symbol():
    """Get currency symbol from locale
    """
    import locale
    locale.setlocale(locale.LC_ALL, '')
    conv = locale.localeconv()
    return conv['currency_symbol']
Example #31
0
import asyncio
from itertools import cycle
import shutil
import typing
import random
from random import *
import time
import sqlite3
import json
import hashlib
import locale
import pygame

pygame.init()

locale.setlocale(locale.LC_TIME, 'FR')

print(time.strftime('%A %d/%m/%Y %H:%M:%S'))

DIR = os.path.dirname(__file__)

os.chdir(r'C:\Users\33664\Desktop\B-Warfare')

status = cycle(
    ['InterliX', 'v 2.01.16.8', 'RPG', '!help',
     time.strftime('%A %d/%m/%Y')])

client = commands.Bot(command_prefix="!")

db = sqlite3.connect(os.path.join(DIR, "BankAccounts.db"))
Example #32
0
#!/Users/bmcnamar/code/bmcnamar-amazon/sam-projects/venv/bin/python

# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <*****@*****.**>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""

try:
    import locale
    locale.setlocale(locale.LC_ALL, '')
except:
    pass

from docutils.core import publish_cmdline, default_description

description = ('Generates pseudo-XML from standalone reStructuredText '
               'sources (for testing purposes).  ' + default_description)

publish_cmdline(description=description)
Example #33
0
 def __init__(self, wunder_slo):
     locale.setlocale(locale.LC_TIME, "pl_PL.utf8")
     self.wunder_slo = wunder_slo
Example #34
0
    sys.exit(1)

## Apply global fixes:

# the following line is to fix python-zsi 2.0 and thus lyrics in ubuntu:
# https://bugs.launchpad.net/ubuntu/+source/zsi/+bug/208855
sys.path.append('/usr/lib/python2.5/site-packages/oldxml')

# hint for gnome.init to set the process name to 'sonata'
if platform.system() == 'Linux':
    sys.argv[0] = 'sonata'

# This is needed so that python-mpd correctly returns lowercase
# keys for, e.g., playlistinfo() with a turkish locale
try:
    locale.setlocale(locale.LC_CTYPE, "C")
except:
    pass

## Apply translation:

# let gettext install _ as a built-in for all modules to see
try:
    gettext.install('sonata',
                    os.path.join(
                        sonata.__file__.split('/lib')[0], 'share', 'locale'),
                    unicode=1)
except:
    gettext.install('sonata', '/usr/share/locale', unicode=1)
gettext.textdomain('sonata')
    def _draw_copyright_notice(self, ctx, w_dots, h_dots, notice=None,
                               osm_date=None):
        """
        Draw a copyright notice at current location and within the
        given w_dots*h_dots rectangle.

        Args:
           ctx (cairo.Context): The Cairo context to use to draw.
           w_dots,h_dots (number): Rectangle dimension (ciaro units).
           font_face (str): Pango font specification.
           notice (str): Optional notice to replace the default.
        """

        today = datetime.date.today()
        if notice is None: 
            notice = _(u'Copyright © %(year)d MapOSMatic/OCitySMap developers.')
            notice+= ' '
            notice+= _(u'Map data © %(year)d OpenStreetMap contributors (see http://osm.org/copyright)')
            notice+= '\n'

            annotations = []
            if self.rc.stylesheet.annotation != '':
                annotations.append(self.rc.stylesheet.annotation)
            for overlay in self._overlays:
                if overlay.annotation != '':
                    annotations.append(overlay.annotation)
            if len(annotations) > 0:
                notice+= _(u'Map styles:')
                notice+= ' ' + '; '.join(annotations) + '\n'

            datasources = set()
            if self.rc.stylesheet.datasource != '':
                datasources.add(self.rc.stylesheet.datasource)
            for overlay in self._overlays:
                if overlay.datasource != '':
                    datasources.add(overlay.datasource)
            if len(datasources) > 0:
                notice+= _(u'Additional data sources:')
                notice+= ' ' + '; '.join(list(datasources)) + '\n'

            notice+= _(u'Map rendered on: %(date)s. OSM data updated on: %(osmdate)s.')
            notice+= ' '
            notice+= _(u'The map may be incomplete or inaccurate.')

        # We need the correct locale to be set for strftime().
        prev_locale = locale.getlocale(locale.LC_TIME)
        try:
            locale.setlocale(locale.LC_TIME, self.rc.i18n.language_code())
        except Exception:
            LOG.warning('error while setting LC_COLLATE to "%s"' % self.rc.i18n.language_code())

        try:
            if osm_date is None:
                osm_date_str = _(u'unknown')
            else:
                osm_date_str = osm_date.strftime("%d %B %Y %H:%M")

            notice = notice % {'year': today.year,
                               'date': today.strftime("%d %B %Y"),
                               'osmdate': osm_date_str}
        finally:
            locale.setlocale(locale.LC_TIME, prev_locale)

        ctx.save()
        pc = PangoCairo.create_context(ctx)
        fd = Pango.FontDescription('DejaVu')
        fd.set_size(Pango.SCALE)
        layout = PangoCairo.create_layout(ctx)
        layout.set_font_description(fd)
        layout.set_text(notice, -1)
        draw_utils.adjust_font_size(layout, fd, w_dots, h_dots)
        PangoCairo.update_layout(ctx, layout)
        PangoCairo.show_layout(ctx, layout)
        ctx.restore()
def main():
    """
    TV for me
    """

    # do some preliminary stuff
    sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
    sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
    sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
    sickbeard.DATA_DIR = sickbeard.PROG_DIR
    sickbeard.MY_ARGS = sys.argv[1:]
    sickbeard.CREATEPID = False
    sickbeard.DAEMON = False

    sickbeard.SYS_ENCODING = None

    try:
        locale.setlocale(locale.LC_ALL, "")
        #sickbeard.SYS_ENCODING = locale.getpreferredencoding()
        sickbeard.SYS_ENCODING = 'UTF-8'
    except (locale.Error, IOError):
        pass

    # for OSes that are poorly configured I'll just force UTF-8
    if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in (
            'ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
        sickbeard.SYS_ENCODING = 'UTF-8'

    # need console logging for SickBeard.py and SickBeard-console.exe
    consoleLogging = (not hasattr(
        sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)

    # rename the main thread
    threading.currentThread().name = "MAIN"

    try:
        opts, args = getopt.getopt(sys.argv[1:], "qfdp::", [
            'quiet', 'forceupdate', 'daemon', 'port=', 'pidfile=', 'nolaunch',
            'config=', 'datadir='
        ])  #@UnusedVariable
    except getopt.GetoptError:
        print "Available options: --quiet, --forceupdate, --port, --daemon, --pidfile, --config, --datadir"
        sys.exit()

    forceUpdate = False
    forcedPort = None
    noLaunch = False

    for o, a in opts:
        # for now we'll just silence the logging
        if o in ('-q', '--quiet'):
            consoleLogging = False

        # should we update right away?
        if o in ('-f', '--forceupdate'):
            forceUpdate = True

        # should we update right away?
        if o in ('--nolaunch', ):
            noLaunch = True

        # use a different port
        if o in ('-p', '--port'):
            forcedPort = int(a)

        # Run as a daemon
        if o in ('-d', '--daemon'):
            if sys.platform == 'win32':
                print "Daemonize not supported under Windows, starting normally"
            else:
                consoleLogging = False
                sickbeard.DAEMON = True

        # config file
        if o in ('--config', ):
            sickbeard.CONFIG_FILE = os.path.abspath(a)

        # datadir
        if o in ('--datadir', ):
            sickbeard.DATA_DIR = os.path.abspath(a)

        # write a pidfile if requested
        if o in ('--pidfile', ):
            sickbeard.PIDFILE = str(a)

            # if the pidfile already exists, sickbeard may still be running, so exit
            if os.path.exists(sickbeard.PIDFILE):
                sys.exit("PID file " + sickbeard.PIDFILE +
                         " already exists. Exiting.")

            # a pidfile is only useful in daemon mode
            # also, test to make sure we can write the file properly
            if sickbeard.DAEMON:
                sickbeard.CREATEPID = True
                try:
                    file(sickbeard.PIDFILE, 'w').write("pid\n")
                except IOError, e:
                    raise SystemExit("Unable to write PID file: %s [%d]" %
                                     (e.strerror, e.errno))
            else:
                logger.log(
                    u"Not running in daemon mode. PID file creation disabled.")
Example #37
0
import locale
locale.setlocale(locale.LC_NUMERIC, 'C')
#from . import normative, metadata, keymap, fstnrm, datadef, variance_misperception, empirical
#from . import early_stopping, ideal_obs
#from . import non_linear_contrast

colors = '''
*** Primary color:

   shade 0 = #D5102F = rgb(213, 16, 47) = rgba(213, 16, 47,1) = rgb0(0.835,0.063,0.184)
   shade 1 = #F3657B = rgb(243,101,123) = rgba(243,101,123,1) = rgb0(0.953,0.396,0.482)
   shade 2 = #E53B56 = rgb(229, 59, 86) = rgba(229, 59, 86,1) = rgb0(0.898,0.231,0.337)
   shade 3 = #AA051F = rgb(170,  5, 31) = rgba(170,  5, 31,1) = rgb0(0.667,0.02,0.122)
   shade 4 = #840015 = rgb(132,  0, 21) = rgba(132,  0, 21,1) = rgb0(0.518,0,0.082)

*** Secondary color (1):

   shade 0 = #E17810 = rgb(225,120, 16) = rgba(225,120, 16,1) = rgb0(0.882,0.471,0.063)
   shade 1 = #FFB46A = rgb(255,180,106) = rgba(255,180,106,1) = rgb0(1,0.706,0.416)
   shade 2 = #F3983F = rgb(243,152, 63) = rgba(243,152, 63,1) = rgb0(0.953,0.596,0.247)
   shade 3 = #B35B05 = rgb(179, 91,  5) = rgba(179, 91,  5,1) = rgb0(0.702,0.357,0.02)
   shade 4 = #8C4500 = rgb(140, 69,  0) = rgba(140, 69,  0,1) = rgb0(0.549,0.271,0)

*** Secondary color (2):

   shade 0 = #0E748B = rgb( 14,116,139) = rgba( 14,116,139,1) = rgb0(0.055,0.455,0.545)
   shade 1 = #4997A9 = rgb( 73,151,169) = rgba( 73,151,169,1) = rgb0(0.286,0.592,0.663)
   shade 2 = #2A8296 = rgb( 42,130,150) = rgba( 42,130,150,1) = rgb0(0.165,0.51,0.588)
   shade 3 = #065B6F = rgb(  6, 91,111) = rgba(  6, 91,111,1) = rgb0(0.024,0.357,0.435)
   shade 4 = #024656 = rgb(  2, 70, 86) = rgba(  2, 70, 86,1) = rgb0(0.008,0.275,0.337)
Example #38
0
    def _win_init_environment(self):
        """
        The Windows implementation of Python ignores environment
        variables when setting the locale; it only pays attention to
        the control panel language settings -- which for practical
        purposes limits one to the language for which one purchased
        Windows. This function enables using alternative
        localizations.
        """

        if 'LANG' in os.environ:
            (lang, loc) = _check_mswin_locale(os.environ['LANG'])
            if loc:
                locale.setlocale(locale.LC_ALL, '.'.join(loc))
                self.lang = lang
                self.encoding = loc[1]
            else:
                LOG.debug("%%LANG%% value %s not usable", os.environ['LANG'])
        if not self.lang:
            locale.setlocale(locale.LC_ALL, '')
            (lang, encoding) = locale.getlocale()
            loc = _check_mswin_locale_reverse(lang)
            if loc[0]:
                self.lang = loc[0]
                self.encoding = loc[1]
            else:
                (lang, loc) = _check_mswin_locale(locale.getdefaultlocale()[0])
                if lang:
                    self.lang = lang
                    self.encoding = loc[1]
                else:
                    LOG.debug("No usable locale found in environment")

        if not self.lang:
            self.lang = 'C'
            self.encoding = 'cp1252'

        if 'LC_MESSAGES' in os.environ:
            lang = self.check_available_translations(os.environ['LC_MESSAGES'])
            if lang:
                self.language = [lang]
            else:
                LOG.debug("No translation for %%LC_MESSAGES%% locale")
        if 'LANGUAGE' in os.environ:
            language = [x for x in [self.check_available_translations(l)
                                    for l in os.environ["LANGUAGE"].split(":")]
                        if x]
            if language:
                self.language = language
            else:
                LOG.debug("No languages with translations found in %%LANGUAGES%%")
        if not self.language:
            self.language = [self.lang[:5]]

        if 'COLLATION' in os.environ:
            coll = os.environ['COLLATION']
            if HAVE_ICU:
                if coll[:2] in ICU_LOCALES:
                    self.collation = coll
                else:
                    self.collation = self.lang
            else:
                (coll, loc) = _check_mswin_locale(coll)
                if not loc:
                    (coll, loc) = _check_mswin_locale(self.lang)
                    self.collation = '.'.join(loc)
                    locale.setlocale(locale.LC_COLLATE, self.collation )
        else:
            if HAVE_ICU:
                self.collation = self.lang
            else:
                (coll, loc) = _check_mswin_locale(self.lang)
                if loc:
                    self.collation = '.'.join(loc)
                else:
                    self.collation = 'C'
                locale.setlocale(locale.LC_COLLATE, self.collation )

# We can't import datahandler stuff or we'll get a circular
# dependency, so we rely on the available translations list
        if 'LC_TIME' in os.environ:
            self.calendar = self.check_available_translations(os.environ['LC_TIME']) or self.lang
        else:
            self.calendar = self.lang

        if 'LC_NUMERIC' in os.environ:
            self.numeric = os.environ['LC_NUMERIC']
        else:
            self.numeric = self.lang

        if 'LC_MONETARY' in os.environ:
            self.currency = os.environ['LC_MONETARY']
        else:
            self.currency = self.lang
Example #39
0
def update_locale():
    """Updates the system locale based on the value set by set_locale()."""
    #logmessage("Using " + str(language) + '_' + str(this_locale) + "\n")
    locale.setlocale(locale.LC_ALL,
                     str(this_thread.language) + '_' + str(this_thread.locale))
    return
Example #40
0
    def _init_from_environment(self):

        def _check_locale(locale):
            if not locale[0]:
                return False
            lang = self.check_available_translations(locale[0])
            if not lang and locale[0].startswith('en'):
                locale[0] = lang = 'en_GB'
            if not lang:
                return False
            self.lang = locale[0]
            self.encoding = locale[1]
            self.language = [lang]
            return True

        _failure = False
        try:
            locale.setlocale(locale.LC_ALL, '')
            if not _check_locale(locale.getdefaultlocale(envvars=('LC_ALL', 'LANG', 'LANGUAGE'))):
                LOG.debug("Usable locale not found, localization settings ignored.");
                self.lang = 'C'
                self.encoding = 'ascii'
                self.language = ['en']
                _failure = True

        except locale.Error as err:
            LOG.debug("Locale error %s, localization settings ignored.",
                        err);
            self.lang = 'C'
            self.encoding = 'ascii'
            self.language = ['en']
            _failure = True

        #LC_MESSAGES
        (loc, enc) = locale.getlocale(locale.LC_MESSAGES)
        if loc:
            language = self.check_available_translations(loc)
            if language:
                self.language = [language]
            else:
                LOG.debug("No translation for LC_MESSAGES locale %s", loc)

        # $LANGUAGE overrides $LANG, $LC_MESSAGES
        if "LANGUAGE" in os.environ:
            language = [x for x in [self.check_available_translations(l)
                                    for l in os.environ["LANGUAGE"].split(":")]
                            if x]
            if language:
                self.language = language
                if not self.lang.startswith(self.language[0]):
                    LOG.debug("Overiding locale setting %s with LANGUAGE setting %s", self.lang, self.language[0])
            elif _failure:
                LOG.warning("No valid locale settings found, using US English")

        if HAVE_ICU:
            self.calendar = locale.getlocale(locale.LC_TIME)[0] or self.lang[:5]
            self.collation = locale.getlocale(locale.LC_COLLATE)[0] or self.lang[:5]
        else:
            loc = locale.getlocale(locale.LC_TIME)
            if loc and self.check_available_translations(loc[0]):
                self.calendar = '.'.join(loc)
            else:
                self.calendar = self.lang

        loc = locale.getlocale(locale.LC_COLLATE)
        if loc and loc[0]:
            self.collation = '.'.join(loc)
        else:
            self.collation = self.lang

        if HAVE_ICU and 'COLLATION' in os.environ:
            self.collation = os.environ['COLLATION']

        loc = locale.getlocale(locale.LC_NUMERIC)
        if loc and loc[0]:
            self.numeric = '.'.join(loc)
        else:
            self.numeric = self.lang

        loc = locale.getlocale(locale.LC_MONETARY)
        if loc and loc[0]:
            self.currency = '.'.join(loc)
        else:
            self.currency = self.lang
Example #41
0
################################################################################
################################################################################

import argparse
import os
import platform
import shutil
import sys
import tempfile
import string
import urllib
import urllib.request
import zipfile

import locale
locale.setlocale(locale.LC_ALL,
                 '')  # Use '' for auto, or force e.g. to 'en_US.UTF-8'

from collections import defaultdict
from sys import platform as _platform

from coreclr_arguments import *

################################################################################
# Azure Storage information
################################################################################

az_account_name = "clrjit2"
az_container_name = "jitrollingbuild"
az_builds_root_folder = "builds"
az_blob_storage_account_uri = "https://" + az_account_name + ".blob.core.windows.net/"
az_blob_storage_container_uri = az_blob_storage_account_uri + az_container_name
Example #42
0
"""
Patches .osc files with .diff.xml files resulting from an upload of
a previous chunk of a multipart upload.
"""

__version__ = "$Revision: 21 $"

import os
import subprocess
import sys
import traceback
import codecs
import locale

import locale, codecs
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
encoding = locale.getlocale()[1]
sys.stdout = codecs.getwriter(encoding)(sys.stdout, errors="replace")
sys.stderr = codecs.getwriter(encoding)(sys.stderr, errors="replace")

if len(sys.argv) < 2 or sys.argv[1] == "--help":
    print >> sys.stderr, u"Synopsis:"
    print >> sys.stderr, u"    %s <file.diff.xml> [osm-files-to-patch...]"
    sys.exit(1)

dd = {}

diff = open(sys.argv[1], "r")
sys.stdout.write("Parsing diff\n")
for line in diff:
    oldpos = line.find("old_id=\"")
Example #43
0
                  help="Set startup file (default is temporary directory)")
parser.add_option('-L', '--locale', action='store', type='str', default='',
                  dest='locale',
                  help="set locale to this string")
parser.add_option('-D', '--debug', action='store_true',
                  default=False,
                  help="start interactive debugger on unexpected error")
parser.add_option('--no-unbuffered', action='store_false', dest='unbuffered',
                  default=True,
                  help="Don't make output stream unbuffered")

(options, test_modules) = parser.parse_args()
# remove empty strings
test_modules = filter(len, test_modules)
verbose = options.verbose
locale.setlocale(locale.LC_ALL, options.locale or '')
test_examples = options.examples
test_images = options.images
installbundles = options.installbundles
dotVistrails = options.dotVistrails
debug_mode = options.debug
vistrails_verbose = options.debugLevel

# Makes stdout unbuffered, so python -u is not needed
class Unbuffered(object):
   def __init__(self, stream):
       self.stream = stream
   def write(self, data):
       self.stream.write(data)
       self.stream.flush()
   def __getattr__(self, attr):
from __future__ import print_function
from src.config import config
import sys
from pyspark.sql import functions as F
from pyspark.sql.functions import col, round
from pyspark.sql.window import Window
from pyspark.sql.functions import lag
from sparkutils import sparkstuff as s
from othermisc import usedFunctions as uf
import locale
locale.setlocale(locale.LC_ALL, 'en_GB')


def main():
    print (f"""Getting average yearly prices per region for all""")
    appName = config['common']['appName']
    spark = s.spark_session(appName)
    sc = s.sparkcontext()
    spark = s.setSparkConfBQ(spark)
    lst = (spark.sql("SELECT FROM_unixtime(unix_timestamp(), 'dd/MM/yyyy HH:mm:ss.ss') ")).collect()
    print("\nStarted at");uf.println(lst)
    wSpecY = Window().partitionBy(F.date_format('Date',"yyyy"), 'regionname')
    house_df = s.loadTableFromBQ(spark,config['GCPVariables']['sourceDataset'],config['GCPVariables']['sourceTable'])
    house_df.printSchema()
    house_df.show(2, False)

    print(f"""\nAnnual House prices per regions in GBP""")
    # Workout yearly aversge prices
    df2 = house_df. \
                    select( \
                          F.date_format('Date', 'yyyy').cast("Integer").alias('year') \
def calculator():
    locale.setlocale(locale.LC_ALL, '')
    root = Tk.Tk()
    root.title("Income Tax Calculator")
    root.geometry("900x300")
    TaxFreeNum = 250000

    def callback():
        GetGrossTax = GrossTaxIn.get()
        GrossYear = float(GetGrossTax)
        GrossMonth = GrossYear / 12
        GrossWeek = GrossYear / 52
        GrossDay = GrossYear / 365
        Yearli = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossYear))
        Yearli.grid(row=1, column=1)
        Monthli = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossMonth))
        Monthli.grid(row=1, column=2)
        Weekli = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossWeek))
        Weekli.grid(row=1, column=3)
        Dayli = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossDay))
        Dayli.grid(row=1, column=4)

        TaxFreeYear = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TaxFreeNum))
        TaxFreeYear.grid(row=2, column=1)
        TaxFreeMonth = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TaxFreeNum / 12))
        TaxFreeMonth.grid(row=2, column=2)
        TaxFreeWeek = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TaxFreeNum / 52))
        TaxFreeWeek.grid(row=2, column=3)
        TaxFreeDay = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TaxFreeNum / 365))
        TaxFreeDay.grid(row=2, column=4)

        if GrossYear > 250000:
            TotalTaxableYear = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossYear - TaxFreeNum))
            TotalTaxableYear.grid(row=3, column=1)
            TotalTaxableMonth = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossMonth - TaxFreeNum / 12))
            TotalTaxableMonth.grid(row=3, column=2)
            TotalTaxableWeekly = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossWeek - TaxFreeNum / 52))
            TotalTaxableWeekly.grid(row=3, column=3)
            TotalTaxableDaily = Tk.Label(RightFrame, text='₹{:,.2f}'.format(GrossDay - TaxFreeNum / 365))
            TotalTaxableDaily.grid(row=3, column=4)
        else:
            TotalTaxableYear = Tk.Label(RightFrame, text='₹0.00')
            TotalTaxableYear.grid(row=3, column=1)
            TotalTaxableMonth = Tk.Label(RightFrame, text='₹0.00')
            TotalTaxableMonth.grid(row=3, column=2)
            TotalTaxableWeekly = Tk.Label(RightFrame, text='₹0.00')
            TotalTaxableWeekly.grid(row=3, column=3)
            TotalTaxableDaily = Tk.Label(RightFrame, text='₹0.00')
            TotalTaxableDaily.grid(row=3, column=4)

        if GrossYear > 1500000:
            AdditionalRate = GrossYear - 1500000
            AdditionalRateTax = Tk.Label(RightFrame, text='₹{:,.2f}'.format(AdditionalRate * 0.30))
            AdditionalRateTax.grid(row=6, column=1)
            HigherRate = Tk.Label(RightFrame, text='₹{:,.2f}'.format(500000 * 0.25))
            HigherRate.grid(row=5, column=1)
            BasicRate = Tk.Label(RightFrame, text='₹{:,.2f}'.format(750000 * 0.10))
            BasicRate.grid(row=4, column=1)
            TotalIn = (150000 * 0.10) + (200000 * 0.25) + (AdditionalRate * 0.30)

        elif GrossYear <= 1500000 and GrossYear >= 1000000:
            HigherRate = GrossYear - 1000000
            HigherRateTax = Tk.Label(RightFrame, text='₹{:,.2f}'.format(HigherRate * 0.25))
            HigherRateTax.grid(row=5, column=1)
            BasicRate = Tk.Label(RightFrame, text='₹{:,.2f}'.format(150000 * 0.10))
            BasicRate.grid(row=4, column=1)
            AdditionalRateTax = Tk.Label(RightFrame, text='₹0.00')
            AdditionalRateTax.grid(row=6, column=1)
            TotalIn = (34449 * 0.10) + (HigherRate * 0.25)

        elif GrossYear <= 999999 and GrossYear >= 250001:
            BasicRate = GrossYear - 250001
            BasicRateTax = Tk.Label(RightFrame, text='₹{:,.2f}'.format(BasicRate * 0.10))
            BasicRateTax.grid(row=4, column=1)
            HigherRateTax = Tk.Label(RightFrame, text='₹0.00')
            HigherRateTax.grid(row=5, column=1)
            AdditionalRateTax = Tk.Label(RightFrame, text='₹0.00')
            AdditionalRateTax.grid(row=6, column=1)
            TotalIn = (BasicRate * 0.10)

        else:
            TotalIn = 0

        TotalNetTax = round(TotalIn)
        TotalNetTaxLi = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TotalNetTax))
        TotalNetTaxLi.grid(row=9, column=1)

        TotalNetWages = GrossYear - TotalNetTax
        TotalNetWagesMonthly = TotalNetWages / 12
        TotalNetWagesWeekly = TotalNetWages / 52
        TotalNetWagesDaily = TotalNetWages / 365
        TotalNetWagesLi = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TotalNetWages))
        TotalNetWagesLi.grid(row=10, column=1)
        TotalNetWagesMonthlyLi = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TotalNetWagesMonthly))
        TotalNetWagesMonthlyLi.grid(row=10, column=2)
        TotalNetWagesWeeklyLi = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TotalNetWagesWeekly))
        TotalNetWagesWeeklyLi.grid(row=10, column=3)
        TotalNetWagesDailyLi = Tk.Label(RightFrame, text='₹{:,.2f}'.format(TotalNetWagesDaily))
        TotalNetWagesDailyLi.grid(row=10, column=4)

    LeftFrame = Tk.Frame(root, width=300, height=200, pady=3)
    RightFrame = Tk.Frame(root, width=400, height=200, pady=3)

    LeftFrame.grid(sticky="n", row=0, column=0)
    RightFrame.grid(sticky="n", row=0, column=1)

    TaxYearOp = Tk.StringVar()
    TaxYearOp.set("2020/2021")

    TaxYear = Tk.Label(LeftFrame, text="Select tax year")
    TaxYear.grid(row=1, column=0)

    Placeholder = Tk.Label(LeftFrame, text="")
    Placeholder.grid(row=1, column=1)

    TaxYearLi = Tk.OptionMenu(Placeholder, TaxYearOp, "2020/2021")
    TaxYearLi.grid(row=1, column=1)
    Placeholder2 = Tk.Label(LeftFrame, text="")
    Placeholder2.grid(row=2, column=1)
    Pension = Tk.Label(LeftFrame, text="Pension contributions (₹)")
    Pension.grid(row=3, column=0)
    PensionEn = Tk.Entry(LeftFrame)
    PensionEn.grid(row=3, column=1)
    GrossTaxLa = Tk.Label(LeftFrame, text="annual earnings here! >")
    GrossTaxLa.grid(row=4, column=0)
    GrossTaxIn = Tk.Entry(LeftFrame)
    GrossTaxIn.grid(row=4, column=1)
    TaxCalGo = Tk.Button(LeftFrame, text="Calculate My Wage", command=callback)
    TaxCalGo.grid(row=5, column=1)

    Summary = Tk.Label(RightFrame, text="Salary Summary", width=15)
    Summary.grid(row=0, column=0)
    Yearly = Tk.Label(RightFrame, text="Year", width=10)
    Yearly.grid(row=0, column=1)
    Monthly = Tk.Label(RightFrame, text="Monthly", width=10)
    Monthly.grid(row=0, column=2)
    Weekly = Tk.Label(RightFrame, text="Weekly", width=10)
    Weekly.grid(row=0, column=3)
    Daily = Tk.Label(RightFrame, text="Daily", width=10)
    Daily.grid(row=0, column=4)

    Summary = Tk.Label(RightFrame, text="Salary Summary", width=15)
    Summary.grid(row=0, column=0)
    GrossPay = Tk.Label(RightFrame, text="Gross Pay", width=15)
    GrossPay.grid(row=1, column=0)
    TaxFree = Tk.Label(RightFrame, text="Tax Free Allowance", width=15)
    TaxFree.grid(row=2, column=0)
    TotalTaxable = Tk.Label(RightFrame, text="Total Taxable", width=15)
    TotalTaxable.grid(row=3, column=0)
    Tax20 = Tk.Label(RightFrame, text="10% rate", width=15)
    Tax20.grid(row=4, column=0)
    Tax40 = Tk.Label(RightFrame, text="25% rate", width=15)
    Tax40.grid(row=5, column=0)
    Tax45 = Tk.Label(RightFrame, text="30% rate", width=15)
    Tax45.grid(row=6, column=0)
    TotalDeductions = Tk.Label(RightFrame, text="Total Deductions", width=15)
    TotalDeductions.grid(row=9, column=0)
    NetWage = Tk.Label(RightFrame, text="Net Wage", width=15)
    NetWage.grid(row=10, column=0)
Example #46
0
#!/usr/bin/env python2
import os
import locale
import re

locale.setlocale(locale.LC_ALL, "cs_CZ.UTF-8")

LYRICS_DIR = './pisne/'
DATA_DIR = './data/'


class Output:
    out = ''
    songs = []

    def __init__(self, songs):
        self.songs = songs

        out = open(DATA_DIR + 'template.tex').read()
        out = out.replace("%CONTENT%", self.getContent())

        self.out = out

    def getContent(self):
        ret = ''
        template = open(DATA_DIR + '/song.tex').read()

        for song in self.songs:
            cret = template.replace("%NAME%", song.name)
            cret = cret.replace("%AUTHOR%", song.author)
            cret = cret.replace("%LYRICS%", song.lyrics)
def set_LC_TIME(value):
  locale.setlocale(locale.LC_TIME,value)
Example #48
0
    def __init__(self, content, metadata=None, settings=None,
                 source_path=None, context=None):
        if metadata is None:
            metadata = {}
        if settings is None:
            settings = copy.deepcopy(DEFAULT_CONFIG)

        self.settings = settings
        self._content = content
        if context is None:
            context = {}
        self._context = context
        self.translations = []

        local_metadata = dict(settings['DEFAULT_METADATA'])
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            if key in ('save_as', 'url'):
                key = 'override_' + key
            setattr(self, key.lower(), value)

        # also keep track of the metadata attributes available
        self.metadata = local_metadata

        #default template if it's not defined in page
        self.template = self._get_template()

        # First, read the authors from "authors", if not, fallback to "author"
        # and if not use the settings defined one, if any.
        if not hasattr(self, 'author'):
            if hasattr(self, 'authors'):
                self.author = self.authors[0]
            elif 'AUTHOR' in settings:
                self.author = Author(settings['AUTHOR'], settings)

        if not hasattr(self, 'authors') and hasattr(self, 'author'):
            self.authors = [self.author]

        # XXX Split all the following code into pieces, there is too much here.

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, from the title
        if not hasattr(self, 'slug') and hasattr(self, 'title'):
            self.slug = slugify(self.title,
                                settings.get('SLUG_SUBSTITUTIONS', ()))

        self.source_path = source_path

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if isinstance(self.date_format, tuple):
            locale_string = self.date_format[0]
            if sys.version_info < (3, ) and isinstance(locale_string,
                                                       six.text_type):
                locale_string = locale_string.encode('ascii')
            locale.setlocale(locale.LC_ALL, locale_string)
            self.date_format = self.date_format[1]

        if hasattr(self, 'date'):
            self.locale_date = strftime(self.date, self.date_format)
        if hasattr(self, 'modified'):
            self.locale_modified = strftime(self.modified, self.date_format)

        # manage status
        if not hasattr(self, 'status'):
            self.status = settings['DEFAULT_STATUS']
            if not settings['WITH_FUTURE_DATES']:
                if hasattr(self, 'date') and self.date > datetime.now():
                    self.status = 'draft'

        # store the summary metadata if it is set
        if 'summary' in metadata:
            self._summary = metadata['summary']

        signals.content_object_init.send(self)
Example #49
0
def reportQualitativa( qualitativa , alumnes = [], grups = [], request = None):

    import locale
    locale.setlocale(locale.LC_TIME, 'ca_ES.utf8')
    
    reports = []
    
    for nivell in Nivell.objects.all():
        for curs in nivell.curs_set.all():
            for grup in curs.grup_set.all():
                q_teRespostes = Q(respostaavaluacioqualitativa__isnull = False) 
                q_alumneTriat =  Q(pk__in = [a.pk for a in alumnes ])
                q_grupTriat = Q( grup__in = grups )
                q_filtre_alumn = q_teRespostes & ( q_alumneTriat | q_grupTriat )
                               
                for alumne in grup.alumne_set.filter( q_filtre_alumn ).distinct(): 
                    report = tools.classebuida()
                    
                    report.alumne = alumne
                    report.respostes = []
                    report.data = qualitativa.data_tancar_avaluacio.strftime( '%d de %B de %Y' )
                    
                    report.tutors = u', '.join( [u'Sr(a) ' + unicode(t) for t in alumne.tutorsDeLAlumne() ] )
                    
                    for assignatura in Assignatura.objects.filter( 
                                        respostaavaluacioqualitativa__qualitativa = qualitativa,
                                        respostaavaluacioqualitativa__alumne = alumne  
                                        ).distinct():
                        resposta = tools.classebuida()
                        resposta.assignatura = assignatura.getLongName()
                        resposta.frases = []
                        for respostaQualitativa in RespostaAvaluacioQualitativa.objects.filter(
                                         alumne = alumne,
                                         assignatura = assignatura ):
                            
                            if respostaQualitativa.frase_oberta:
                                resposta.frases.append(respostaQualitativa.frase_oberta ) 
                            else:
                                resposta.frases.append(respostaQualitativa.item ) 
                        
                        report.respostes.append( resposta )

                        #endfor resposta
                    
                    if report: reports.append( report )      
                    
                #endfor alumne   
                            
                       
    #from django.template import Context                              
    from appy.pod.renderer import Renderer
    import cgi
    import os
    from django import http
    import time
    
    excepcio = None
    contingut = None
    try:
        
        #resultat = StringIO.StringIO( )
        resultat = "/tmp/DjangoAula-temp-{0}-{1}.odt".format( time.time(), request.session.session_key )
        #context = Context( {'reports' : reports, } )
        path = None
        try:
            path = os.path.join( settings.PROJECT_DIR,  '../customising/docs/qualitativa.odt' )
        except: 
            path = os.path.join(os.path.dirname(__file__), 'templates/qualitativa.odt')
        renderer = Renderer(path, {'reports' : reports, }, resultat)  
        renderer.run()
        docFile = open(resultat, 'rb')
        contingut = docFile.read()
        docFile.close()
        os.remove(resultat)
        
    except Exception, e:
        excepcio = unicode( e )
Example #50
0
import os.path as path
from seqprep import countUndetInd
from seqhub import hUtil
from seqhub.hUtil import flatten, mkdir_p
import os, re, glob, traceback
from lxml import etree, html
from abc import ABCMeta, abstractmethod

try:
    from collections import OrderedDict
except ImportError:
    from ordereddict import OrderedDict # for python 2.6 and earlier, use backport 

import locale  #for printing of commas in numbers using format()
ignored = locale.setlocale(locale.LC_ALL, '') # empty string for platform's default setting

class IlluminaNextGenAnalysis:

    __metaclass__ = ABCMeta


    @classmethod
    def getInstance(cls, analysisName, run):

        if run.runType == 'HiSeq':
            return HiSeqAnalysis(analysisName, run)
        elif run.runType == 'NextSeq':
            return NextSeqAnalysis(analysisName, run)
        else:
            raise Exception('Unrecognized run type: %s' % run.runType)
Example #51
0
# Convert JSON to MARC
# Usage: python json2marc.py aut/person_autoriteter.json build/hansteen.json > build/hansteen.marc21.xml
#
import sys
import os
import re
import csv
import json
import xmlwitch
from copy import copy
from datetime import datetime
import locale
import argparse

locale.setlocale(locale.LC_ALL, 'no_NO.UTF-8')

# Current date and time with 1 millisecond digit, as required by the MARC21 005 field
current_datetime = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f')[:16]
current_datetime = '20181127225134.9'  # Fix while testing, TODO: REMOVE THIS LINE


def format_date(dato):

    # Fullstendig dato
    m = re.match('[0-9]{4}-[0-9]{2}-[0-9]{2}', dato)
    if m is not None:
        return datetime.strptime(dato,
                                 '%Y-%m-%d').strftime('%d. %B %Y').lstrip('0')

    # Mangler årstall
    m = re.match('uuuu-[0-9]{2}-[0-9]{2}', dato)
Example #52
0
def config():
    if SUBTITLE_SHOWS_VIEW_COUNT == True:
        locale.setlocale(locale.LC_ALL, LOCALE)
Example #53
0
    def setup(self):
        self.queues = queues.declare_queues(self)

        ################# PROVIDERS
        self.media_provider = select_provider(
            self.config,
            self.pkg_resources_working_set,
            "r2.provider.media",
            self.media_provider,
        )
        self.startup_timer.intermediate("providers")

        ################# CONFIGURATION
        # AMQP is required
        if not self.amqp_host:
            raise ValueError("amqp_host not set in the .ini")

        if not self.cassandra_seeds:
            raise ValueError("cassandra_seeds not set in the .ini")

        # heavy load mode is read only mode with a different infobar
        if self.heavy_load_mode:
            self.read_only_mode = True

        origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
        self.origin = "http://" + origin_prefix + self.domain

        self.trusted_domains = set([self.domain])
        if self.https_endpoint:
            https_url = urlparse(self.https_endpoint)
            self.trusted_domains.add(https_url.hostname)

        # load the unique hashed names of files under static
        static_files = os.path.join(self.paths.get('static_files'), 'static')
        names_file_path = os.path.join(static_files, 'names.json')
        if os.path.exists(names_file_path):
            with open(names_file_path) as handle:
                self.static_names = json.load(handle)
        else:
            self.static_names = {}

        # make python warnings go through the logging system
        logging.captureWarnings(capture=True)

        log = logging.getLogger('reddit')

        # when we're a script (paster run) just set up super simple logging
        if self.running_as_script:
            log.setLevel(logging.INFO)
            log.addHandler(logging.StreamHandler())

        # if in debug mode, override the logging level to DEBUG
        if self.debug:
            log.setLevel(logging.DEBUG)

        # attempt to figure out which pool we're in and add that to the
        # LogRecords.
        try:
            with open("/etc/ec2_asg", "r") as f:
                pool = f.read().strip()
            # clean up the pool name since we're putting stuff after "-"
            pool = pool.partition("-")[0]
        except IOError:
            pool = "reddit-app"
        self.log = logging.LoggerAdapter(log, {"pool": pool})

        # set locations
        self.locations = {}

        if not self.media_domain:
            self.media_domain = self.domain
        if self.media_domain == self.domain:
            print >> sys.stderr, (
                "Warning: g.media_domain == g.domain. " +
                "This may give untrusted content access to user cookies")

        for arg in sys.argv:
            tokens = arg.split("=")
            if len(tokens) == 2:
                k, v = tokens
                self.log.debug("Overriding g.%s to %s" % (k, v))
                setattr(self, k, v)

        self.reddit_host = socket.gethostname()
        self.reddit_pid = os.getpid()

        if hasattr(signal, 'SIGUSR1'):
            # not all platforms have user signals
            signal.signal(signal.SIGUSR1, thread_dump)

        locale.setlocale(locale.LC_ALL, self.locale)

        # Pre-calculate ratelimit values
        self.RL_RESET_SECONDS = self.config["RL_RESET_MINUTES"] * 60
        self.RL_MAX_REQS = int(self.config["RL_AVG_REQ_PER_SEC"] *
                               self.RL_RESET_SECONDS)

        self.RL_OAUTH_RESET_SECONDS = self.config["RL_OAUTH_RESET_MINUTES"] * 60
        self.RL_OAUTH_MAX_REQS = int(self.config["RL_OAUTH_AVG_REQ_PER_SEC"] *
                                     self.RL_OAUTH_RESET_SECONDS)

        self.startup_timer.intermediate("configuration")

        ################# ZOOKEEPER
        # for now, zookeeper will be an optional part of the stack.
        # if it's not configured, we will grab the expected config from the
        # [live_config] section of the ini file
        zk_hosts = self.config.get("zookeeper_connection_string")
        if zk_hosts:
            from r2.lib.zookeeper import (connect_to_zookeeper, LiveConfig,
                                          LiveList)
            zk_username = self.config["zookeeper_username"]
            zk_password = self.config["zookeeper_password"]
            self.zookeeper = connect_to_zookeeper(zk_hosts,
                                                  (zk_username, zk_password))
            self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
            self.secrets = fetch_secrets(self.zookeeper)
            self.throttles = LiveList(self.zookeeper,
                                      "/throttles",
                                      map_fn=ipaddress.ip_network,
                                      reduce_fn=ipaddress.collapse_addresses)
        else:
            self.zookeeper = None
            parser = ConfigParser.RawConfigParser()
            parser.optionxform = str
            parser.read([self.config["__file__"]])
            self.live_config = extract_live_config(parser, self.plugins)
            self.secrets = extract_secrets(parser)
            self.throttles = tuple()  # immutable since it's not real

        self.startup_timer.intermediate("zookeeper")

        ################# MEMCACHE
        num_mc_clients = self.num_mc_clients

        # the main memcache pool. used for most everything.
        self.memcache = CMemcache(
            self.memcaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just used for @memoize results
        memoizecaches = CMemcache(
            self.memoizecaches,
            min_compress_len=50 * 1024,
            num_clients=num_mc_clients,
        )

        # a pool just for srmember rels
        srmembercaches = CMemcache(
            self.srmembercaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
        )

        ratelimitcaches = CMemcache(
            self.ratelimitcaches,
            min_compress_len=96,
            num_clients=num_mc_clients,
        )

        # a smaller pool of caches used only for distributed locks.
        # TODO: move this to ZooKeeper
        self.lock_cache = CMemcache(self.lockcaches,
                                    num_clients=num_mc_clients)
        self.make_lock = make_lock_factory(self.lock_cache, self.stats)

        # memcaches used in front of the permacache CF in cassandra.
        # XXX: this is a legacy thing; permacache was made when C* didn't have
        # a row cache.
        if self.permacache_memcaches:
            permacache_memcaches = CMemcache(self.permacache_memcaches,
                                             min_compress_len=50 * 1024,
                                             num_clients=num_mc_clients)
        else:
            permacache_memcaches = None

        # the stalecache is a memcached local to the current app server used
        # for data that's frequently fetched but doesn't need to be fresh.
        if self.stalecaches:
            stalecaches = CMemcache(self.stalecaches,
                                    num_clients=num_mc_clients)
        else:
            stalecaches = None

        # rendercache holds rendered partial templates.
        rendercaches = CMemcache(
            self.rendercaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=480,
        )

        # pagecaches hold fully rendered pages
        pagecaches = CMemcache(
            self.pagecaches,
            noreply=True,
            no_block=True,
            num_clients=num_mc_clients,
            min_compress_len=1400,
        )

        self.startup_timer.intermediate("memcache")

        ################# CASSANDRA
        keyspace = "reddit"
        self.cassandra_pools = {
            "main":
            StatsCollectingConnectionPool(keyspace,
                                          stats=self.stats,
                                          logging_name="main",
                                          server_list=self.cassandra_seeds,
                                          pool_size=self.cassandra_pool_size,
                                          timeout=4,
                                          max_retries=3,
                                          prefill=False),
        }

        permacache_cf = CassandraCache(
            'permacache',
            self.cassandra_pools[self.cassandra_default_pool],
            read_consistency_level=self.cassandra_rcl,
            write_consistency_level=self.cassandra_wcl)

        self.startup_timer.intermediate("cassandra")

        ################# POSTGRES
        self.dbm = self.load_db_params()
        self.startup_timer.intermediate("postgres")

        ################# CHAINS
        # initialize caches. Any cache-chains built here must be added
        # to cache_chains (closed around by reset_caches) so that they
        # can properly reset their local components
        cache_chains = {}
        localcache_cls = (SelfEmptyingCache
                          if self.running_as_script else LocalCache)

        if stalecaches:
            self.cache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                self.memcache,
            )
        else:
            self.cache = MemcacheChain((localcache_cls(), self.memcache))
        cache_chains.update(cache=self.cache)

        if stalecaches:
            self.memoizecache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                memoizecaches,
            )
        else:
            self.memoizecache = MemcacheChain(
                (localcache_cls(), memoizecaches))
        cache_chains.update(memoizecache=self.memoizecache)

        if stalecaches:
            self.srmembercache = StaleCacheChain(
                localcache_cls(),
                stalecaches,
                srmembercaches,
            )
        else:
            self.srmembercache = MemcacheChain(
                (localcache_cls(), srmembercaches))
        cache_chains.update(srmembercache=self.srmembercache)

        self.ratelimitcache = MemcacheChain(
            (localcache_cls(), ratelimitcaches))
        cache_chains.update(ratelimitcache=self.ratelimitcache)

        self.rendercache = MemcacheChain((
            localcache_cls(),
            rendercaches,
        ))
        cache_chains.update(rendercache=self.rendercache)

        self.pagecache = MemcacheChain((
            localcache_cls(),
            pagecaches,
        ))
        cache_chains.update(pagecache=self.pagecache)

        # the thing_cache is used in tdb_cassandra.
        self.thing_cache = CacheChain((localcache_cls(), ))
        cache_chains.update(thing_cache=self.thing_cache)

        self.permacache = CassandraCacheChain(
            localcache_cls(),
            permacache_cf,
            memcache=permacache_memcaches,
            lock_factory=self.make_lock,
        )
        cache_chains.update(permacache=self.permacache)

        # hardcache is used for various things that tend to expire
        # TODO: replace hardcache w/ cassandra stuff
        self.hardcache = HardcacheChain(
            (localcache_cls(), self.memcache, HardCache(self)),
            cache_negative_results=True,
        )
        cache_chains.update(hardcache=self.hardcache)

        # I know this sucks, but we need non-request-threads to be
        # able to reset the caches, so we need them be able to close
        # around 'cache_chains' without being able to call getattr on
        # 'g'
        def reset_caches():
            for name, chain in cache_chains.iteritems():
                chain.reset()
                chain.stats = CacheStats(self.stats, name)

        self.cache_chains = cache_chains

        self.reset_caches = reset_caches
        self.reset_caches()

        self.startup_timer.intermediate("cache_chains")

        # try to set the source control revision numbers
        self.versions = {}
        r2_root = os.path.dirname(os.path.dirname(self.paths["root"]))
        r2_gitdir = os.path.join(r2_root, ".git")
        self.short_version = self.record_repo_version("r2", r2_gitdir)

        if I18N_PATH:
            i18n_git_path = os.path.join(os.path.dirname(I18N_PATH), ".git")
            self.record_repo_version("i18n", i18n_git_path)

        self.startup_timer.intermediate("revisions")
Example #54
0
def reportQualitativa2( qualitativa , alumnes = [], grups = [], request = None):

    import locale
    locale.setlocale(locale.LC_TIME, 'ca_ES.utf8')
    
    reports = []
    
    for nivell in Nivell.objects.all():
        for curs in nivell.curs_set.all():
            for grup in curs.grup_set.all():
                q_teRespostes = Q(respostaavaluacioqualitativa__isnull = False) 
                q_alumneTriat =  Q(pk__in = [a.pk for a in alumnes ])
                q_grupTriat = Q( grup__in = grups )
                q_filtre_alumn = q_teRespostes & ( q_alumneTriat | q_grupTriat )
                               
                for alumne in grup.alumne_set.filter( q_filtre_alumn ).distinct(): 
                    report = []
                    taula = tools.classebuida()
                    taula.titol = tools.classebuida()
                    taula.titol.contingut = ""
                    taula.classe = 'titol'
                    taula.capceleres = []
                    taula.printIfEmpty = True
                    
                    capcelera = tools.classebuida()
                    capcelera.amplade = 900
                    capcelera.contingut = u'''Butlletí de qualificacions de l'avaluació qualitativa.'''.upper()
                    taula.capceleres.append(capcelera)
                    taula.fileres = []

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'Benvolguts tutors/res'
                    filera.append(camp)      
                    taula.fileres.append(filera)              
                    
                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'''L’equip de professors de l’alumne/a {0} que cursa {1} us fem arribar un full de seguiment 
                    de començament de curs. En aquest butlletí us notifiquem algunes observacions que ens semblen significatives en 
                    aquest inici del curs. Esperem que us sigui d’utilitat per prendre les decisions correctes pel normal 
                    desenvolupament del curs.'''.format( alumne, alumne.grup )
                    filera.append(camp)      
                    taula.fileres.append(filera)              
                    
                    report.append(taula)
        
                    taula = tools.classebuida()
                    taula.titol = tools.classebuida()
                    taula.titol.contingut = ""
                    taula.capceleres = []
                    taula.classe = 'pijama'
                    
                    capcelera = tools.classebuida()
                    capcelera.amplade = 300
                    capcelera.contingut = u'''Matèria'''
                    taula.capceleres.append(capcelera)

                    capcelera = tools.classebuida()
                    capcelera.amplade = 700
                    capcelera.contingut = u'''Comentaris'''
                    taula.capceleres.append(capcelera)   
                    
                    taula.fileres = []                 
                    
                    for assignatura in Assignatura.objects.filter( 
                                        respostaavaluacioqualitativa__qualitativa = qualitativa,
                                        respostaavaluacioqualitativa__alumne = alumne  
                                        ).distinct():
                        
                        esPrimeraResposta = True
                        for resposta in RespostaAvaluacioQualitativa.objects.filter(
                                         alumne = alumne,
                                         assignatura = assignatura ):
                            
                            filera = []
                            
                            camp = tools.classebuida()
                            camp.contingut = u'{0}'.format( assignatura.nom_assignatura if esPrimeraResposta else '' )
                            filera.append(camp)

                            camp = tools.classebuida()
                            camp.contingut = u'{0}'.format( resposta.item  )
                            filera.append(camp)   
                                                                                 
                            taula.fileres.append( filera )
                            
                            esPrimeraResposta = False
                        #endfor resposta
                        
                    #endfor assignatura
                    report.append(taula) #------------------------------------------------------------------------------
                    
                    taula = tools.classebuida()
                    taula.titol = tools.classebuida()
                    taula.titol.contingut = ""
                    taula.classe = 'pijama'
                    taula.capceleres = []
                    
                    capcelera = tools.classebuida()
                    capcelera.amplade = 700
                    capcelera.contingut = u'''Comentari del tutor/a'''
                    taula.capceleres.append(capcelera)
                    taula.fileres = []

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u''
                    filera.append(camp)      
                    taula.fileres.append(filera)  

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'_____________________________________________________________________________________'
                    filera.append(camp)      
                    taula.fileres.append(filera)  
                    taula.fileres.append(filera)  
                    taula.fileres.append(filera)  
                    report.append(taula) #------------------------------------------------------------------------------

                    taula = tools.classebuida()
                    taula.titol = tools.classebuida()
                    taula.titol.contingut = ""
                    taula.classe = 'pijama'
                    taula.capceleres = []
                    taula.fileres = []
                                        
                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'Atentament,'
                    filera.append(camp)      
                    taula.fileres.append(filera)  

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'{0}'.format( u','.join( [u'Sr(a) ' + unicode(t.professor) for t in alumne.tutorsDeLAlumne() ] ) )  
                    filera.append(camp)      
                    taula.fileres.append(filera)

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'Figueres, a {0}'.format( qualitativa.data_tancar_avaluacio.strftime( '%d de %B de %Y' ) )  
                    filera.append(camp)      
                    taula.fileres.append(filera)

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'''................................................................................
                    .......................................................................................'''
                    filera.append(camp)      
                    taula.fileres.append(filera)
                    report.append(taula) #------------------------------------------------------------------------------
                    
                    taula = tools.classebuida()
                    taula.titol = tools.classebuida()
                    taula.titol.contingut = ""
                    taula.classe = 'pijama'
                    taula.capceleres = []
                    taula.fileres = []
                                        
                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'''El/la Sr/a ______________________________________ com a pare / mare / tutor/a de l'alumne
                    {0} de {1}, he rebut el butlletí de qualificacions de l'avaluació qualitativa.'''.format( alumne, alumne.grup )
                    filera.append(camp)      
                    taula.fileres.append(filera)  
                    
                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'Signat:'
                    filera.append(camp)      
                    taula.fileres.append(filera)  

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'DNI núm:'
                    filera.append(camp)      
                    taula.fileres.append(filera)  

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u''
                    filera.append(camp)      
                    taula.fileres.append(filera)  
                    taula.fileres.append(filera)  

                    filera = []
                    camp = tools.classebuida()
                    camp.contingut = u'_________________, ____ de _______________ de {0}'.format( qualitativa.data_tancar_avaluacio.year )
                    filera.append(camp)      
                    taula.fileres.append(filera)  
                    report.append(taula) #------------------------------------------------------------------------------

                    
                    if report: reports.append( report )      
                    
                #endfor alumne   
                            
                       
                              
    formatPDF = True
    if formatPDF:
        return write_pdf('pdfReport.html',{
            'pagesize' : 'A4',
            'reports' : reports,
            })
    else:
        return render_to_response(
            'report.html',
                {'report': report,
                 'head': u'Avaluacions Qualitatives' ,
                },
                context_instance=RequestContext(request))      
def main(root_path, force_launch=False):

    gtk_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version())
    editorstate.gtk_version = gtk_version
    try:
        editorstate.mlt_version = mlt.LIBMLT_VERSION
    except:
        editorstate.mlt_version = "0.0.99" # magic string for "not found"

    global _session_id
    _session_id = md5.new(os.urandom(16)).hexdigest()

    # Set paths.
    respaths.set_paths(root_path)

    # Init session folders
    if os.path.exists(get_session_folder()):
        shutil.rmtree(get_session_folder())
        
    os.mkdir(get_session_folder())

    # Load editor prefs and list of recent projects
    editorpersistance.load()
    if editorpersistance.prefs.dark_theme == True:
        respaths.apply_dark_theme()

    # Init translations module with translations data
    translations.init_languages()
    translations.load_filters_translations()
    mlttransitions.init_module()

    # Load aniamtions data
    natronanimations.load_animations_projects_xml()

    # Init gtk threads
    Gdk.threads_init()
    Gdk.threads_enter()

    # Set monitor sizes
    """
    scr_w = Gdk.Screen.width()
    scr_h = Gdk.Screen.height()
    editorstate.SCREEN_WIDTH = scr_w
    editorstate.SCREEN_HEIGHT = scr_h
    if editorstate.screen_size_large_height() == True and editorstate.screen_size_small_width() == False:
        global MONITOR_WIDTH, MONITOR_HEIGHT
        MONITOR_WIDTH = 650
        MONITOR_HEIGHT = 400 # initial value, this gets changed when material is loaded
    """
    
    # Request dark them if so desired
    if editorpersistance.prefs.dark_theme == True:
        Gtk.Settings.get_default().set_property("gtk-application-prefer-dark-theme", True)

    # We need mlt fpr profiles handling
    repo = mlt.Factory().init()

    # Set numeric locale to use "." as radix, MLT initilizes this to OS locale and this causes bugs 
    locale.setlocale(locale.LC_NUMERIC, 'C')

    # Check for codecs and formats on the system
    mltenv.check_available_features(repo)
    renderconsumer.load_render_profiles()

    # Load filter and compositor descriptions from xml files.
    mltfilters.load_filters_xml(mltenv.services)
    mlttransitions.load_compositors_xml(mltenv.transitions)

    # Create list of available mlt profiles
    mltprofiles.load_profile_list()

    gui.load_current_colors()
    
    # Set launch profile
    profile_name = sys.argv[1].replace("_", " ") # we had underscores put in to pass as single arg
    print profile_name
    global _profile
    _profile = mltprofiles.get_profile(profile_name)
    
    global _animation_instance
    _animation_instance = natronanimations.get_default_animation_instance(_profile)
        
    global _window
    _window = NatronAnimatationsToolWindow()
    _window.pos_bar.set_dark_bg_color()

    Gtk.main()
    Gdk.threads_leave()
Example #56
0
from time import strftime
from djfacet.constants import *
from django.http import QueryDict
import locale
locale.setlocale(locale.LC_ALL, '')  # UK

try:
    from settings import DEBUG
except:
    DEBUG = False

##################
#
#  FACETED MANAGER AND VIEWS UTILS
#
##################


def get_defaultResType():
    """ gets the specified default resulttype - or the first one in case we can't find it """
    for x in DJF_SPECS.result_types:
        if x.get('isdefault'):
            return x
    return DJF_SPECS.result_types[0]


def validate_ResType(restype_candidate_string):
    """ from a string (normally passed through the url) this function checks
		that it matches one of the result type. If it fails it returns the default one.
	"""
    RESULT_TYPES = DJF_SPECS.result_types
Example #57
0
import requests
from bs4 import BeautifulSoup
import locale
import numpy as np
import re

### For the Tripadvisor page of Paris restaurants this script extract:
### -The name of each restaurant 
### -The type of cuisine of each restaurant (still a bit buggy)
### -The number of reviews in each language for each restaurant

locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )

lan_list = ["French", "English", "Italian", "Chinese", "Arabic", "Czech", "Danish", "Dutch", "German", "Greek", "Hebrew", "Hungarian", "Indonesian", "Japanese", "Korean", "Norwegian", "Polish", "Portuguese", "Russian", "Serbian", "Slovak", "Spanish", "Swedish", "Thai", "Turkish", "Vietnamese"]

lan_n = len(lan_list)

lan_l_recompile=[re.compile("French"), re.compile("English"), re.compile("Italian"), re.compile("Chinese"), re.compile("Arabic"), re.compile("Czech"), re.compile("Danish"), re.compile("Dutch"), re.compile("German"), re.compile("Greek"), re.compile("Hebrew"), re.compile("Hungarian"), re.compile("Indonesian"), re.compile("Japanese"), re.compile("Korean"), re.compile("Norwegian"), re.compile("Polish"), re.compile("Portuguese"), re.compile("Russian"), re.compile("Serbian"), re.compile("Slovak"), re.compile("Spanish"), re.compile("Swedish"), re.compile("Thai"), re.compile("Turkish"), re.compile("Vietnamese")] 

for i in range(0, 14760, 30):
	tmp_url = 'https://www.tripadvisor.com/Restaurants-g187147-oa'+str(i)+'-Paris_Ile_de_France.html#EATERY_OVERVIEW_BOX'
	r = requests.get(tmp_url)
	soup = BeautifulSoup(r.content, "lxml")

	for link in soup.find_all('a', class_="property_title"):
#    print(link.get('href'))
	    resto_url='https://www.tripadvisor.com'+link.get('href')
#    print resto_url
	    resto = requests.get(resto_url)
	    soup_resto = BeautifulSoup(resto.content, "lxml")
	    name = soup_resto.find_all("title")
Example #58
0
def main(initializer_module, heartbeat_stop_callback=None):
    global config
    global home_dir

    parser = OptionParser()
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      action="store_true",
                      help="verbose log output",
                      default=False)
    parser.add_option(
        "-e",
        "--expected-hostname",
        dest="expected_hostname",
        action="store",
        help=
        "expected hostname of current host. If hostname differs, agent will fail",
        default=None)
    parser.add_option("--home",
                      dest="home_dir",
                      action="store",
                      help="Home directory",
                      default="")
    (options, args) = parser.parse_args()

    expected_hostname = options.expected_hostname
    home_dir = options.home_dir

    logging_level = logging.DEBUG if options.verbose else logging.INFO

    setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(),
                  logging_level)
    global is_logger_setup
    is_logger_setup = True
    setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(),
                  logging_level)
    setup_logging(alerts_logger_2,
                  AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
    setup_logging(alerts_logger_global,
                  AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
    setup_logging(apscheduler_logger,
                  AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
    setup_logging(apscheduler_logger_global,
                  AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level)
    Logger.initialize_logger('resource_management',
                             logging_level=logging_level)
    #with Environment() as env:
    #  File("/abc")

    # init data, once loggers are setup to see exceptions/errors of initialization.
    initializer_module.init()

    if home_dir != "":
        # When running multiple Ambari Agents on this host for simulation, each one will use a unique home directory.
        Logger.info("Agent is using Home Dir: %s" % str(home_dir))

    # use the host's locale for numeric formatting
    try:
        locale.setlocale(locale.LC_ALL, '')
    except locale.Error as ex:
        logger.warning(
            "Cannot set locale for ambari-agent. Please check your systemwide locale settings. Failed due to: {0}."
            .format(str(ex)))

    default_cfg = {'agent': {'prefix': '/home/ambari'}}
    config.load(default_cfg)

    if (len(sys.argv) > 1) and sys.argv[1] == 'stop':
        stop_agent()

    if (len(sys.argv) > 2) and sys.argv[1] == 'reset':
        reset_agent(sys.argv)

    # Check for ambari configuration file.
    resolve_ambari_config()

    # Add syslog hanlder based on ambari config file
    add_syslog_handler(logger)

    # Starting data cleanup daemon
    data_cleaner = None
    if config.has_option('agent', 'data_cleanup_interval') and int(
            config.get('agent', 'data_cleanup_interval')) > 0:
        data_cleaner = DataCleaner(config)
        data_cleaner.start()

    perform_prestart_checks(expected_hostname)

    # Starting ping port listener
    try:
        #This acts as a single process machine-wide lock (albeit incomplete, since
        # we still need an extra file to track the Agent PID)
        ping_port_listener = PingPortListener(config)
    except Exception as ex:
        err_message = "Failed to start ping port listener of: " + str(ex)
        logger.error(err_message)
        sys.stderr.write(err_message)
        sys.exit(1)
    ping_port_listener.start()

    update_log_level(config)

    update_open_files_ulimit(config)

    if not config.use_system_proxy_setting():
        logger.info('Agent is configured to ignore system proxy settings')
        #reconfigure_urllib2_opener(ignore_system_proxy=True)

    if not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
        daemonize()

    #
    # Iterate through the list of server hostnames and connect to the first active server
    #

    active_server = None
    server_hostnames = hostname.server_hostnames(config)

    connected = False
    stopped = False

    # Keep trying to connect to a server or bail out if ambari-agent was stopped
    while not connected and not stopped:
        for server_hostname in server_hostnames:
            server_url = config.get_api_url(server_hostname)
            try:
                server_ip = socket.gethostbyname(server_hostname)
                logger.info('Connecting to Ambari server at %s (%s)',
                            server_url, server_ip)
            except socket.error:
                logger.warn(
                    "Unable to determine the IP address of the Ambari server '%s'",
                    server_hostname)

            # Wait until MAX_RETRIES to see if server is reachable
            netutil = NetUtil(config, initializer_module.stop_event)
            (retries, connected,
             stopped) = netutil.try_to_connect(server_url, MAX_RETRIES, logger)

            # if connected, launch controller
            if connected:
                logger.info('Connected to Ambari server %s', server_hostname)
                # Set the active server
                active_server = server_hostname
                # Launch Controller communication
                run_threads(initializer_module)

            #
            # If Ambari Agent connected to the server or
            # Ambari Agent was stopped using stop event
            # Clean up if not Windows OS
            #
            if connected or stopped:
                ExitHelper().exit()
                logger.info("finished")
                break
        pass  # for server_hostname in server_hostnames
    pass  # while not (connected or stopped)

    return active_server
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("csv_file", help=ARG_HELP_STRINGS["csv_file"])
    parser.add_argument("-O",
                        "--offsetting_mode",
                        help=ARG_HELP_STRINGS["offsetting"])
    parser.add_argument("-b",
                        "--bypass-cert-verification",
                        action="store_true",
                        help=ARG_HELP_STRINGS["bypass"])
    parser.add_argument("-d",
                        "--offline_doaj",
                        help=ARG_HELP_STRINGS["offline_doaj"])
    parser.add_argument("-D",
                        "--offline_doaj_download",
                        help=ARG_HELP_STRINGS["offline_doaj_download"])
    parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"])
    parser.add_argument("-f",
                        "--force",
                        action="store_true",
                        help=ARG_HELP_STRINGS["force"])
    parser.add_argument("-i",
                        "--ignore-header",
                        action="store_true",
                        help=ARG_HELP_STRINGS["ignore_header"])
    parser.add_argument("-j",
                        "--force-header",
                        action="store_true",
                        help=ARG_HELP_STRINGS["force_header"])
    parser.add_argument("-l", "--locale", help=ARG_HELP_STRINGS["locale"])
    parser.add_argument("-u",
                        "--add-unknown-columns",
                        action="store_true",
                        help=ARG_HELP_STRINGS["unknown_columns"])
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help=ARG_HELP_STRINGS["verbose"])
    parser.add_argument("-o",
                        "--overwrite",
                        action="store_true",
                        help=ARG_HELP_STRINGS["overwrite"])
    parser.add_argument("-r",
                        "--round_monetary",
                        action="store_true",
                        help=ARG_HELP_STRINGS["round_monetary"])
    parser.add_argument("--no-crossref",
                        action="store_true",
                        help=ARG_HELP_STRINGS["no_crossref"])
    parser.add_argument("--no-pubmed",
                        action="store_true",
                        help=ARG_HELP_STRINGS["no_pubmed"])
    parser.add_argument("--no-doaj",
                        action="store_true",
                        help=ARG_HELP_STRINGS["no_doaj"])
    parser.add_argument("-institution",
                        "--institution_column",
                        type=int,
                        help=ARG_HELP_STRINGS["institution"])
    parser.add_argument("-period",
                        "--period_column",
                        type=int,
                        help=ARG_HELP_STRINGS["period"])
    parser.add_argument("-doi",
                        "--doi_column",
                        type=int,
                        help=ARG_HELP_STRINGS["doi"])
    parser.add_argument("-euro",
                        "--euro_column",
                        type=int,
                        help=ARG_HELP_STRINGS["euro"])
    parser.add_argument("-is_hybrid",
                        "--is_hybrid_column",
                        type=int,
                        help=ARG_HELP_STRINGS["is_hybrid"])
    parser.add_argument("-publisher",
                        "--publisher_column",
                        type=int,
                        help=ARG_HELP_STRINGS["publisher"])
    parser.add_argument("-journal_full_title",
                        "--journal_full_title_column",
                        type=int,
                        help=ARG_HELP_STRINGS["journal_full_title"])
    parser.add_argument("-issn",
                        "--issn_column",
                        type=int,
                        help=ARG_HELP_STRINGS["issn"])
    parser.add_argument("-url",
                        "--url_column",
                        type=int,
                        help=ARG_HELP_STRINGS["url"])
    parser.add_argument("-start", type=int, help=ARG_HELP_STRINGS["start"])
    parser.add_argument("-end", type=int, help=ARG_HELP_STRINGS["end"])
    parser.add_argument("-q",
                        "--quotemask",
                        default="tfftttttttttttttttt",
                        help=ARG_HELP_STRINGS["quotemask"])
    parser.add_argument("-n",
                        "--no-openapc-quote-rules",
                        help=ARG_HELP_STRINGS["no_openapc_quote_rules"],
                        action="store_true",
                        default=False)

    args = parser.parse_args()

    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(oat.ANSIColorFormatter())
    bufferedHandler = oat.BufferedErrorHandler(handler)
    bufferedHandler.setFormatter(oat.ANSIColorFormatter())
    logging.root.addHandler(handler)
    logging.root.addHandler(bufferedHandler)
    logging.root.setLevel(logging.INFO)

    if args.offline_doaj and args.offline_doaj_download:
        oat.print_r("Error: Either use the -d or the -D option, not both.")
        sys.exit()

    if args.locale:
        norm = locale.normalize(args.locale)
        if norm != args.locale:
            msg = "locale '{}' not found, normalised to '{}'".format(
                args.locale, norm)
            oat.print_y(msg)
        try:
            loc = locale.setlocale(locale.LC_ALL, norm)
            oat.print_g("Using locale " + loc)
        except locale.Error as loce:
            msg = "Setting locale to {} failed: {}".format(norm, loce.message)
            oat.print_r(msg)
            sys.exit()

    enc = None  # CSV file encoding
    if args.encoding:
        try:
            codec = codecs.lookup(args.encoding)
            msg = ("Encoding '{}' found in Python's codec collection " +
                   "as '{}'").format(args.encoding, codec.name)
            oat.print_g(msg)
            enc = args.encoding
        except LookupError:
            msg = ("Error: '" + args.encoding + "' not found Python's " +
                   "codec collection. Either look for a valid name here " +
                   "(https://docs.python.org/2/library/codecs.html#standard-" +
                   "encodings) or omit this argument to enable automated " +
                   "guessing.")
            oat.print_r(msg)
            sys.exit()

    result = oat.analyze_csv_file(args.csv_file, enc=enc)
    if result["success"]:
        csv_analysis = result["data"]
        print(csv_analysis)
    else:
        print(result["error_msg"])
        sys.exit()

    if enc is None:
        enc = csv_analysis.enc
    dialect = csv_analysis.dialect
    has_header = csv_analysis.has_header or args.force_header

    if enc is None:
        print("Error: No encoding given for CSV file and automated " +
              "detection failed. Please set the encoding manually via the " +
              "--enc argument")
        sys.exit()

    reduced = args.quotemask.replace("f", "").replace("t", "")
    if len(reduced) > 0:
        print("Error: A quotemask may only contain the letters 't' and " +
              "'f'!")
        sys.exit()
    mask = [True if x == "t" else False for x in args.quotemask]

    doaj_offline_analysis = None
    if args.offline_doaj:
        if os.path.isfile(args.offline_doaj):
            doaj_offline_analysis = oat.DOAJOfflineAnalysis(args.offline_doaj)
        else:
            oat.print_r("Error: " + args.offline_doaj + " does not seem "
                        "to be a file!")
            sys.exit()
    elif args.offline_doaj_download:
        if os.path.isfile(args.offline_doaj_download):
            oat.print_r("Error: Target file '" + args.offline_doaj_download +
                        "' already exists!")
            sys.exit()
        doaj_offline_analysis = oat.DOAJOfflineAnalysis(
            args.offline_doaj_download, download=True)

    csv_file = open(args.csv_file, "r", encoding=enc)
    reader = csv.reader(csv_file, dialect=dialect)

    first_row = next(reader)
    num_columns = len(first_row)
    print("\nCSV file has {} columns.".format(num_columns))

    csv_file.seek(0)
    reader = csv.reader(csv_file, dialect=dialect)

    if args.overwrite:
        ow_strategy = CSVColumn.OW_ALWAYS
    else:
        ow_strategy = CSVColumn.OW_ASK

    openapc_column_map = OrderedDict([
        ("institution",
         CSVColumn("institution",
                   CSVColumn.MANDATORY,
                   args.institution_column,
                   overwrite=ow_strategy)),
        ("period",
         CSVColumn("period",
                   CSVColumn.MANDATORY,
                   args.period_column,
                   overwrite=ow_strategy)),
        ("euro",
         CSVColumn("euro",
                   CSVColumn.MANDATORY,
                   args.euro_column,
                   overwrite=ow_strategy)),
        ("doi",
         CSVColumn("doi",
                   CSVColumn.MANDATORY,
                   args.doi_column,
                   overwrite=ow_strategy)),
        ("is_hybrid",
         CSVColumn("is_hybrid",
                   CSVColumn.MANDATORY,
                   args.is_hybrid_column,
                   overwrite=ow_strategy)),
        ("publisher",
         CSVColumn("publisher",
                   CSVColumn.OPTIONAL,
                   args.publisher_column,
                   overwrite=ow_strategy)),
        ("journal_full_title",
         CSVColumn("journal_full_title",
                   CSVColumn.OPTIONAL,
                   args.journal_full_title_column,
                   overwrite=ow_strategy)),
        ("issn",
         CSVColumn("issn",
                   CSVColumn.OPTIONAL,
                   args.issn_column,
                   overwrite=ow_strategy)),
        ("issn_print",
         CSVColumn("issn_print", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("issn_electronic",
         CSVColumn("issn_electronic",
                   CSVColumn.NONE,
                   None,
                   overwrite=ow_strategy)),
        ("issn_l",
         CSVColumn("issn_l", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("license_ref",
         CSVColumn("license_ref", CSVColumn.NONE, None,
                   overwrite=ow_strategy)),
        ("indexed_in_crossref",
         CSVColumn("indexed_in_crossref",
                   CSVColumn.NONE,
                   None,
                   overwrite=ow_strategy)),
        ("pmid", CSVColumn("pmid", CSVColumn.NONE, None,
                           overwrite=ow_strategy)),
        ("pmcid",
         CSVColumn("pmcid", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("ut", CSVColumn("ut", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("url",
         CSVColumn("url",
                   CSVColumn.OPTIONAL,
                   args.url_column,
                   overwrite=ow_strategy)),
        ("doaj", CSVColumn("doaj", CSVColumn.NONE, None,
                           overwrite=ow_strategy))
    ])

    offsetting_column_map = OrderedDict([
        ("institution",
         CSVColumn("institution",
                   CSVColumn.MANDATORY,
                   args.institution_column,
                   overwrite=ow_strategy)),
        ("period",
         CSVColumn("period",
                   CSVColumn.MANDATORY,
                   args.period_column,
                   overwrite=ow_strategy)),
        ("euro",
         CSVColumn("euro",
                   CSVColumn.NONE,
                   args.euro_column,
                   overwrite=ow_strategy)),
        ("doi",
         CSVColumn("doi",
                   CSVColumn.MANDATORY,
                   args.doi_column,
                   overwrite=ow_strategy)),
        ("is_hybrid",
         CSVColumn("is_hybrid",
                   CSVColumn.MANDATORY,
                   args.is_hybrid_column,
                   overwrite=ow_strategy)),
        ("publisher",
         CSVColumn("publisher",
                   CSVColumn.OPTIONAL,
                   args.publisher_column,
                   overwrite=ow_strategy)),
        ("journal_full_title",
         CSVColumn("journal_full_title",
                   CSVColumn.OPTIONAL,
                   args.journal_full_title_column,
                   overwrite=ow_strategy)),
        ("issn",
         CSVColumn("issn",
                   CSVColumn.OPTIONAL,
                   args.issn_column,
                   overwrite=ow_strategy)),
        ("issn_print",
         CSVColumn("issn_print", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("issn_electronic",
         CSVColumn("issn_electronic",
                   CSVColumn.NONE,
                   None,
                   overwrite=ow_strategy)),
        ("issn_l",
         CSVColumn("issn_l", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("license_ref",
         CSVColumn("license_ref", CSVColumn.NONE, None,
                   overwrite=ow_strategy)),
        ("indexed_in_crossref",
         CSVColumn("indexed_in_crossref",
                   CSVColumn.NONE,
                   None,
                   overwrite=ow_strategy)),
        ("pmid", CSVColumn("pmid", CSVColumn.NONE, None,
                           overwrite=ow_strategy)),
        ("pmcid",
         CSVColumn("pmcid", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("ut", CSVColumn("ut", CSVColumn.NONE, None, overwrite=ow_strategy)),
        ("url",
         CSVColumn("url",
                   CSVColumn.OPTIONAL,
                   args.url_column,
                   overwrite=ow_strategy)),
        ("doaj", CSVColumn("doaj", CSVColumn.NONE, None,
                           overwrite=ow_strategy)),
        ("agreement",
         CSVColumn("agreement", CSVColumn.NONE, None, overwrite=ow_strategy)),
    ])

    if args.offsetting_mode:
        column_map = offsetting_column_map
    else:
        column_map = openapc_column_map

    header = None
    if has_header:
        for row in reader:
            if not row:  # Skip empty lines
                continue
            header = row  # First non-empty row should be the header
            if args.ignore_header:
                print("Skipping header analysis due to command line argument.")
                break
            else:
                print("\n    *** Analyzing CSV header ***\n")
            for (index, item) in enumerate(header):
                column_type = oat.get_column_type_from_whitelist(item)
                if column_type is not None and column_map[
                        column_type].index is None:
                    column_map[column_type].index = index
                    column_map[column_type].column_name = item
                    found_msg = ("Found column named '{}' at index {}, " +
                                 "assuming this to be the {} column.")
                    print(found_msg.format(item, index, column_type))
            break

    print("\n    *** Starting heuristical analysis ***\n")
    for row in reader:
        if not row:  # Skip empty lines
            # We analyze the first non-empty line, a possible header should
            # have been processed by now.
            continue
        column_candidates = {"doi": [], "period": [], "euro": []}
        found_msg = "The entry in column {} looks like a potential {}: {}"
        for (index, entry) in enumerate(row):
            if index in [csvcolumn.index for csvcolumn in column_map.values()]:
                # Skip columns already assigned
                continue
            entry = entry.strip()
            # Search for a DOI
            if column_map['doi'].index is None:
                if oat.DOI_RE.match(entry):
                    column_id = str(index)
                    # identify column either numerically or by column header
                    if header:
                        column_id += " ('" + header[index] + "')"
                    print(found_msg.format(column_id, "DOI", entry))
                    column_candidates['doi'].append(index)
                    continue
            # Search for a potential year string
            if column_map['period'].index is None:
                try:
                    maybe_period = int(entry)
                    now = datetime.date.today().year
                    # Should be a wide enough margin
                    if maybe_period >= 2000 and maybe_period <= now + 2:
                        column_id = str(index)
                        if header:
                            column_id += " ('" + header[index] + "')"
                        print(found_msg.format(column_id, "year", entry))
                        column_candidates['period'].append(index)
                        continue
                except ValueError:
                    pass
            # Search for a potential monetary amount
            if column_map['euro'].index is None:
                try:
                    maybe_euro = locale.atof(entry)
                    if maybe_euro >= 10 and maybe_euro <= 10000:
                        column_id = str(index)
                        if header:
                            column_id += " ('" + header[index] + "')"
                        print(found_msg.format(column_id, "euro amount",
                                               entry))
                        column_candidates['euro'].append(index)
                        continue
                except ValueError:
                    pass
        for column_type, candidates in column_candidates.items():
            if column_map[column_type].index is not None:
                continue
            if len(candidates) > 1:
                print("Could not reliably identify the '" + column_type +
                      "' column - more than one possible candiate!")
            elif len(candidates) < 1:
                print("No candidate found for column '" + column_type + "'!")
            else:
                index = candidates.pop()
                column_map[column_type].index = index
                if header:
                    column_id = header[index]
                    column_map[column_type].column_name = column_id
                else:
                    column_id = index
                msg = "Assuming column '{}' to be the '{}' column."
                print(msg.format(column_id, column_type))
                column_map[column_type].index = index
        break

    # Wrap up: Check if there any mandatory column types left which have not
    # yet been identified - we cannot continue in that case (unless forced).
    unassigned = [
        x for x in iter(column_map.items())
        if x[1].requirement == CSVColumn.MANDATORY and x[1].index is None
    ]
    if unassigned:
        for item in unassigned:
            print("The {} column is still unidentified.".format(item[0]))
        if header:
            print("The CSV header is:\n" + dialect.delimiter.join(header))
        if not args.force:
            print("ERROR: We cannot continue because not all mandatory " +
                  "column types in the CSV file could be automatically " +
                  "identified. There are 2 ways to fix this:")
            if not header:
                print("1) Add a header row to your file and identify the " +
                      "column(s) by assigning them an appropiate column name.")
            else:
                print("1) Identify the missing column(s) by assigning them " +
                      "a different column name in the CSV header (You can " +
                      "use the column name(s) mentioned in the message above)")
            print("2) Use command line parameters when calling this script " +
                  "to identify the missing columns (use -h for help) ")
            sys.exit()
        else:
            print("WARNING: Not all mandatory column types in the CSV file " +
                  "could be automatically identified - forced to continue.")

    print("\n    *** CSV file analysis summary ***\n")

    index_dict = {csvc.index: csvc for csvc in column_map.values()}

    for index in range(num_columns):
        column_name = ""
        if header:
            column_name = header[index]
        if index in index_dict:
            column = index_dict[index]
            msg = u"column number {} ({}) is the {} column '{}'".format(
                index, column_name, column.requirement, column.column_type)
            if column.requirement in [CSVColumn.MANDATORY, CSVColumn.OPTIONAL]:
                oat.print_g(msg)
            else:
                oat.print_b(msg)
        else:
            if args.add_unknown_columns:
                msg = (
                    u"column number {} ({}) is an unknown column, it will be "
                    + "appended to the generated CSV file")
                oat.print_y(msg.format(index, column_name))
                if not column_name:
                    # Use a generic name
                    column_name = "unknown"
                while column_name in column_map.keys():
                    # TODO: Replace by a numerical, increasing suffix
                    column_name += "_"
                column_map[column_name] = CSVColumn(column_name,
                                                    CSVColumn.NONE, index)
            else:
                msg = (
                    u"column number {} ({}) is an unknown column, it will be "
                    + "ignored")
                oat.print_y(msg.format(index, column_name))

    print()
    for column in column_map.values():
        if column.index is None:
            msg = "The {} column '{}' could not be identified."
            print(msg.format(column.requirement, column.column_type))

    # Check for unassigned optional column types. We can continue but should
    # issue a warning as all entries will need a valid DOI in this case.
    unassigned = filter(
        lambda k, v: v.requirement == CSVColumn.OPTIONAL and v.index is None,
        column_map.items())
    if unassigned:
        print("\nWARNING: Not all optional column types could be " +
              "identified. Metadata aggregation is still possible, but " +
              "every entry in the CSV file will need a valid DOI.")

    start = input("\nStart metadata aggregation? (y/n):")
    while start not in ["y", "n"]:
        start = input("Please type 'y' or 'n':")
    if start == "n":
        sys.exit()

    print("\n    *** Starting metadata aggregation ***\n")

    enriched_content = []

    csv_file.seek(0)
    reader = csv.reader(csv_file, dialect=dialect)
    header_processed = False
    row_num = 0

    for row in reader:
        row_num += 1
        if not row:
            continue  # skip empty lines
        if not header_processed:
            header_processed = True
            enriched_content.append(list(column_map.keys()))
            if has_header:
                # If the CSV file has a header, we are currently there - skip it
                # to get to the first data row
                continue
        if args.start and args.start > row_num:
            continue
        if args.end and args.end < row_num:
            continue
        print("---Processing line number " + str(row_num) + "---")
        enriched_row = oat.process_row(row, row_num, column_map, num_columns,
                                       args.no_crossref, args.no_pubmed,
                                       args.no_doaj, doaj_offline_analysis,
                                       args.round_monetary,
                                       args.offsetting_mode)
        enriched_content.append(enriched_row)

    csv_file.close()

    with open('out.csv', 'w') as out:
        writer = oat.OpenAPCUnicodeWriter(out, mask,
                                          not args.no_openapc_quote_rules,
                                          True, True)
        writer.write_rows(enriched_content)

    if not bufferedHandler.buffer:
        oat.print_g("Metadata enrichment successful, no errors occured")
    else:
        oat.print_r("There were errors during the enrichment process:\n")
    # closing will implicitly flush the handler and print any buffered
    # messages to stderr
    bufferedHandler.close()
Example #60
0
 def tearDown(self):
     locale.setlocale(locale.LC_TIME, self.old_locale)
     super(IPNLocaleTest, self).tearDown()