Exemplo n.º 1
0
def sane_unicode_and_exception_handling(script):
    """
    Exit 1 on failure, and 2 on error.  Print the traceback assuming UTF-8.
    """
    # Simulates python3's defaulting to utf-8 output so we don't get confusing
    # `UnicodeEncodeError`s when printing unicode characters:
    from kitchen.text.converters import getwriter, exception_to_bytes, to_bytes
    if sys.stdout.encoding is None:
        sys.stdout = getwriter('utf8')(sys.stdout)
    if sys.stderr.encoding is None:
        sys.stderr = getwriter('utf8')(sys.stderr)

    try:
        yield
    except Exception as e:  # pylint: disable=W0703
        error_message = exception_to_bytes(e)
        if not error_message and isinstance(e, AssertionError):
            error_message = traceback.extract_tb(sys.exc_info()[2])[-1][3]
        sys.stdout.write("FAIL: %s: %s: %s\n" %
                         (script, type(e).__name__, error_message))

        # This is a hack to allow printing exceptions that have unicode messages
        # attached to them.  The default behaviour of Python 2.7 is to replace
        # unicode charactors with \x023-like backslash escapes.  Instead we
        # format them as utf-8 bytes
        #
        # It's not thread-safe, but will only be called at the end of execution:
        traceback._some_str = to_bytes  # pylint: disable=protected-access
        traceback.print_exc(file=sys.stderr)

        # 1 is failure and 2 is error
        if isinstance(e, (stbt.UITestFailure, AssertionError)):
            sys.exit(1)  # Failure
        else:
            sys.exit(2)  # Error
Exemplo n.º 2
0
def main(args):
    UTF8Writer = getwriter('utf8')
    sys.stdout = UTF8Writer(sys.stdout)

    LSimilarityVars.per_metric_optimal_values = StaticValues.MetricOptimalValues[
        args["-e"].lower()]
    dataset_path = [x for x in args['-d'].split(',')]

    evaluator = rc.Evaluator(args['--ml'], args['--sort'], args['--stemming'],
                             args['--canonical'], args['--permuted'],
                             args['-e'])

    if args['--buildDataset']:
        evaluator.build_dataset()
        sys.exit(0)

    fpath_ds = getRelativePathtoWorking(dataset_path[0])
    if os.path.isfile(fpath_ds):
        evaluator.initialize(fpath_ds, args['--ev'], args['--jobs'],
                             args['--accuracyresults'])

        if int(args['--test']):
            evaluator.test_cases(fpath_ds, int(args['--test']))
        elif args['--tuning']:
            evaluator.hyperparamTuning(fpath_ds)
        elif args['--cmp']:
            evaluator.print_false_posneg(dataset_path)
        elif args['--optimalThres']:
            evaluator.evaluate_metrics_with_various_thres(fpath_ds)
        elif args['--optimalSortingThres']:
            evaluator.evaluate_sorting_with_various_thres(fpath_ds)
        else:
            evaluator.evaluate_metrics(fpath_ds, args['--fs'], args['-f'])
    else:
        print("No file {0} exists!!!\n".format(fpath_ds))
Exemplo n.º 3
0
def run_logprint(api, query, formatter, follow=False, interval=0, latency=2, output=None, header=None):
    if output is None:
        output = getwriter('utf8')(sys.stdout)

    if follow:
        assert query.limit is None

        try:
            while True:
                result = run_logprint(api, query, formatter, follow=False, output=output)
                new_range = SearchRange(from_time=result.range_to,
                        to_time=result.range_to.replace(seconds=+1))
                query = query.copy_with_range(new_range)

                time.sleep(interval / 1000.0)
        except KeyboardInterrupt:
            print("\nInterrupted follow mode. Exiting...")

    else:
        result = api.search(query, fetch_all=True)
        formatted_msgs = [formatter(m) for m in result.messages]
        formatted_msgs.reverse()

        for msg in formatted_msgs:
            print(msg, file=output)

        return result
Exemplo n.º 4
0
    def do_play(self, xid):
        if not xid:
            self.help_play()
            return
        print "Fetching Guardian cryptic crossword: ", xid
        UTF8Writer = getwriter("utf8")
        nullout = UTF8Writer(io.open(os.devnull, "wb"))
        bkp_stdout = sys.stdout
        sys.stdout = nullout

        xwd = get_crossword(xid, format="etree")
        self.xid = xid

        sys.stdout = bkp_stdout
        nullout.close()
        self.xwd = xwd

        if xwd is None:
            self.print_warn("Error 404: Invalid id")
            return

        print "You are playing: ", xwd.find("Title").attrib["v"]
        print "Crossword copyright: ", xwd.find("Copyright").attrib["v"]
        print "Type `help` to see available commands."
        self.sol["w"] = int(xwd.find("Width").attrib["v"])
        self.sol["h"] = int(xwd.find("Height").attrib["v"])
        self.sol["all"] = xwd.find("Allanswer").attrib["v"]
        for i in range(self.sol["h"]):
            self.sol["grid"].append(list(self.sol["all"][15 * i : 15 * (i + 1)]))
        for x in xwd.find("across"):
            clue_attrib = dict(x.attrib)
            self.cn_rc[int(clue_attrib["cn"])] = (
                (int(clue_attrib["n"]) - 1) / self.sol["w"],
                (int(clue_attrib["n"]) - 1) % self.sol["w"],
            )
            self.cn_rc[
                ((int(clue_attrib["n"]) - 1) / self.sol["w"], (int(clue_attrib["n"]) - 1) % self.sol["w"])
            ] = int(clue_attrib["cn"])
            self.sol["a"][x.tag] = clue_attrib
        for x in xwd.find("down"):
            clue_attrib = dict(x.attrib)
            self.cn_rc[int(clue_attrib["cn"])] = (
                (int(clue_attrib["n"]) - 1) / self.sol["w"],
                (int(clue_attrib["n"]) - 1) % self.sol["w"],
            )
            self.cn_rc[
                ((int(clue_attrib["n"]) - 1) / self.sol["w"], (int(clue_attrib["n"]) - 1) % self.sol["w"])
            ] = int(clue_attrib["cn"])
            self.sol["d"][x.tag] = clue_attrib

        for i in range(self.sol["h"]):
            att_str = []
            for c in list(self.sol["grid"][i]):
                if c == "-":
                    att_str.append(u"█")
                else:
                    att_str.append(u" ")
            self.attempt.append(att_str)
Exemplo n.º 5
0
def main():
    args = parse_args()

    # Setup stdout
    encoding = locale.getpreferredencoding()
    writer = getwriter(encoding)
    sys.stdout = writer(sys.stdout)

    args.query = to_unicode(args.query)

    goodreads_key = api_key
    goodreads_secret = api_secret

    grc = client.GoodreadsClient(goodreads_key, goodreads_secret)

    books = get_books(grc, args)
    AR_sorted = sorted(books, key=lambda b: float(b.average_rating))
    NTR_sorted = sorted(books, key=lambda b: float(b.text_reviews_count))
    max_AR = float(AR_sorted[-1].average_rating)
    max_NTR = float(NTR_sorted[-1].text_reviews_count)

    Books = []
    totaux = []
    for book in books:
        d = {
            star_label: int(star_number)
            for star_label, star_number in (
                tuple(rating.split(':'))
                for rating in book.rating_dist.split('|'))
        }
        totaux.append(float(d.pop('total', 'inf')))
    TR_sorted = sorted(totaux)
    max_TR = TR_sorted[-1]

    for book in books:
        b = Book(book, max_AR, max_NTR, max_TR)
        Books.append(b)

    if not os.path.isdir("results"):
        os.mkdir("results")
    result_file = os.path.join("results", "results.txt")
    with open(result_file, "w") as f:
        old, sys.stdout = sys.stdout, f
        print(
            "List of results, sorted by higher fitness:\n<Book title> : <Book fitness>"
        )
        print("-" * 56)
        for book in sorted(Books, key=lambda b: b.fitnessScore, reverse=True):
            print(book)
    sys.stdout = old  # restore stdout
    print(
        "Find your results in the 'results.txt' file of the root directory of the script"
    )
Exemplo n.º 6
0
def main(args):
    UTF8Writer = getwriter('utf8')
    sys.stdout = UTF8Writer(sys.stdout)

    dataset_path = args['-d']

    eval = Evaluator(args['--permuted'], args['--stemming'], args['--sort'],
                     args['--print'])
    full_dataset_path = eval.getTMabsPath(dataset_path)

    if os.path.isfile(full_dataset_path):
        eval.initialize(full_dataset_path, args['--ev'],
                        args['--accuracyresults'])
        if args['--print']:
            sys.exit(0)

        eval.evaluate_metrics(full_dataset_path)
    else:
        print("No file {0} exists!!!\n".format(full_dataset_path))
Exemplo n.º 7
0
    def test_utf8_writer(self):
        writer = converters.getwriter('utf-8')
        io = writer(self.io)
        io.write(self.u_japanese + u'\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.utf8_japanese)

        io.seek(0)
        io.truncate(0)
        io.write(self.euc_jp_japanese + '\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.euc_jp_japanese)

        io.seek(0)
        io.truncate(0)
        io.write(self.utf8_japanese + '\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.utf8_japanese)
    def test_utf8_writer(self):
        writer = converters.getwriter('utf-8')
        io = writer(self.io)
        io.write(self.u_japanese + '\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.utf8_japanese)

        io.seek(0)
        io.truncate(0)
        io.write(self.euc_jp_japanese + b'\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.euc_jp_japanese)

        io.seek(0)
        io.truncate(0)
        io.write(self.utf8_japanese + b'\n')
        io.seek(0)
        result = io.read().strip()
        tools.eq_(result, self.utf8_japanese)
Exemplo n.º 9
0
# -*- coding: utf-8 -*-

from suds.client import Client
import base64
import logging
import hashlib
import sys
from kitchen.text.converters import getwriter
import cPickle as pickle

UTF8Writer = getwriter("utf8")
sys.stdout = UTF8Writer(sys.stdout)

url = "PRIVATE"
webapiKey = "PRIVATE"
userLogin = "******"
userHashPassword = base64.b64encode(hashlib.sha256(b"PRIVATE").digest())
countryCode = 1
countryId = 1

logging.getLogger("suds.client").setLevel(logging.CRITICAL)
client = Client("https://webapi.allegro.pl/service.php?wsdl")
service = client.service
versions = {}

for row in service.doQueryAllSysStatus(**{"countryId": countryId, "webapiKey": webapiKey}).item:
    versions[row.countryId] = row

sessionId = service.doLoginEnc(
    **{
        "userLogin": userLogin,
Exemplo n.º 10
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

# Inspired from: https://github.com/redhat-openstack/khaleesi/blob/master/plugins/callbacks/human_log.py
# Further improved support Ansible 2.0
from __future__ import (absolute_import, division, print_function)
import sys

# Unicode workarounds based on https://pythonhosted.org/kitchen/unicode-frustrations.html
from kitchen.text.converters import getwriter

encoding = 'utf8'
sys.stdout = getwriter(encoding)(sys.stdout)
sys.stderr = getwriter(encoding)(sys.stderr)

__metaclass__ = type

try:
    import simplejson as json
except ImportError:
    import json

# Fields to reformat output for
FIELDS = ['cmd', 'command', 'start', 'end', 'delta', 'msg', 'stdout',
          'stderr', 'results']


class CallbackModule(object):
Exemplo n.º 11
0
 def test_error_handlers(self):
     '''Test setting alternate error handlers'''
     writer = converters.getwriter('latin1')
     io = writer(self.io, errors='strict')
     tools.assert_raises(UnicodeEncodeError, io.write, self.u_japanese)
Exemplo n.º 12
0
	def entry(self, alch):

		arquivo = 'alch.txt'
		sourcepath = str(os.getcwd())
		path = os.path.join(sourcepath, arquivo)
		globalpath = os.path.join(sourcepath, 'static', 'global')

		demo_text = alch

		UTF8Writer = getwriter('utf8')

		orig_stdout = sys.stdout
		f = file(path, 'w')
		sys.stdout = f
		sys.stdout = UTF8Writer(sys.stdout)

		# Create the AlchemyAPI Object


		response = alchemyapi.combined('text', demo_text)

		if response['status'] == 'OK':


			print('## Keywords ##')
			for keyword in response['keywords']:
				print(keyword['text'], ' : ', keyword['relevance'])
			print('')

			print('## Concepts ##')
			for concept in response['concepts']:
				print(concept['text'], ' : ', concept['relevance'])
			print('')

			print('## Entities ##')
			for entity in response['entities']:
				print(entity['text'], ' : ', entity['type'], ', ', entity['relevance'])
			print(' ')

		else:
			print('Error in combined call: ', response['statusInfo'])

		print('')
		print('')

		sys.stdout = orig_stdout
		f.close()

		f = open(path, 'r')

		content1 = ''
		for line in f:
			content1 = content1 + line + "<br>"

		csvstr = content1.replace('## Keywords ##', '#1#')\
			.replace('## Entities ##', '#3#').replace('## Concepts ##', '#2#')

		csvstr = csvstr.replace(':', ',').replace('.', ',').split('<br>')
		x = 1; csvstr1='';csvstr2='';csvstr3=''
		for line in csvstr:
			if line != '\t':
				if line != '\n':
					if line != '\r':
						if line != ' ':
							if line != '':
								if line != ' ':
									if '#2#' in line:
										x += 1
									if '#3#' in line:
										x += 1
									if x == 3:
										csvstr3 = csvstr3 + line

									if x == 2:
										csvstr2 = csvstr2 + line

									if x == 1:
										csvstr1 = csvstr1 + line


			csvstr1 = csvstr1.replace('#1#', 'Holding,0,Market Value')
			csvstr2 = csvstr2.replace('#2#', 'Holding,0,Market Value')
			csvstr3 = csvstr3.replace('#3#', 'Holding,0,Market Value').replace('0,M', '1,M',).replace('0,', '')

			fileout = open(os.path.join(globalpath, 'treemap1.csv'), 'w')
			fileout.write(csvstr1)
			fileout = open(os.path.join(globalpath, 'treemap2.csv'), 'w')
			fileout.write(csvstr2)
			fileout = open(os.path.join(globalpath, 'treemap3.csv'), 'w')
			fileout.writelines([item for item in csvstr3[:-2]])





		return content1
Exemplo n.º 13
0
from kitchen.i18n import get_translation_object

if __name__ == "__main__":
    # Setup gettext driven translations but use the kitchen functions so
    # we don't have the mismatched bytes-unicode issues.
    translations = get_translation_object('example')
    # We use _() for marking strings that we operate on as unicode
    # This is pretty much everything
    _ = translations.ugettext
    # And b_() for marking strings that we operate on as bytes.
    # This is limited to exceptions
    b_ = translations.lgettext

    # Setup stdout
    encoding = locale.getpreferredencoding()
    Writer = getwriter(encoding)
    sys.stdout = Writer(sys.stdout)

    # Load data.  Format is filename\0description
    # description should be utf-8 but filename can be any legal filename
    # on the filesystem
    # Sample datafile.txt:
    #   /etc/shells\x00Shells available on caf\xc3\xa9.lan
    #   /var/tmp/file\xff\x00File with non-utf8 data in the filename
    #
    # And to create /var/tmp/file\xff (under bash or zsh) do:
    #   echo 'Some data' > /var/tmp/file$'\377'
    datafile = open('datafile.txt', 'r')
    data = {}
    for line in datafile:
        # We're going to keep filename as bytes because we will need the
Exemplo n.º 14
0
def jsonToCsv(fileName, separator):
    theStr = ''
    with open('%s/%s.json' % (os.getcwd(), fileName), 'rb') as f:
        theJson = json.loads(f.read())
    f.close()

    methodList = [{
        'about_me': 'string'
    }, {
        'accept_rate': 'integer'
    }, {
        'account_id': 'integer'
    }, {
        'age': 'integer'
    }, {
        'answer_count': 'integer'
    }, {
        'badge_counts': 'badge_count'
    }, {
        'creation_date': 'date'
    }, {
        'display_name': 'string'
    }, {
        'down_vote_count': 'integer'
    }, {
        'is_employee': 'boolean'
    }, {
        'last_access_date': 'date'
    }, {
        'last_modified_date': 'date'
    }, {
        'link': 'string'
    }, {
        'location': 'string'
    }, {
        'profile_image': 'string'
    }, {
        'question_count': 'integer'
    }, {
        'reputation': 'integer'
    }, {
        'reputation_change_day': 'integer'
    }, {
        'reputation_change_month': 'integer'
    }, {
        'reputation_change_quarter': 'integer'
    }, {
        'reputation_change_week': 'integer'
    }, {
        'reputation_change_year': 'integer'
    }, {
        'timed_penalty_date': 'date'
    }, {
        'up_vote_count': 'integer'
    }, {
        'user_id': 'integer'
    }, {
        'user_type': 'string'
    }, {
        'view_count': 'integer'
    }, {
        'website_url': 'string'
    }]
    for method in methodList:
        for key in method:
            theStr += str(key) + separator
    theStr += '\n'
    for user in theJson:
        for method in methodList:
            for key in method:
                try:
                    theStr += user[str(key)]
                except TypeError:
                    theStr += str(user[str(key)])
                except KeyError:
                    theStr += 'No ' + str(key)
                except UnicodeError:
                    theStr += 'Unicode Error'
            theStr += separator
        theStr += '\n'
    UTF8Writer = getwriter('utf8')

    with open('%s/%s.csv' % (os.getcwd(), fileName), 'wb') as f:
        f = UTF8Writer(f)
        f.write(theStr)
    f.close()
Exemplo n.º 15
0
 def __init__(self, stdin, stdout, stderr):
     self.stdin = stdin
     self.stdout = getwriter('utf8')(stdout)
     self.stderr = stderr
Exemplo n.º 16
0
    def printText(self, raw_text):
    
        color_list = [
            'black',
            'dark red',
            'dark green',
            'brown',
            'dark blue',
            'dark magenta',
            'dark cyan',
            'light gray',
            'dark gray',
            'light red',
            'light green',
            'yellow',
            'light blue',
            'light magenta',
            'light cyan',
            'white'
        ]

        # Ugly, find a way to match VT100 plus the string using regex!
        ansi_regex = '(\x1b\[|\x9b)[^@-_]*[@-_]|\x1b[@-_]'
        ansi_escape = re.compile(ansi_regex, flags=re.IGNORECASE)

        formated_text = []
        if ansi_escape.findall(raw_text):

            UTF8Writer = getwriter('utf8')
            raw_text = UTF8Writer(raw_text)

            # This is dumb!, prone to error, find a better way!
            for at in raw_text.split("\x1b["):
                try:
                    attr, text = at.split("m",1)
                except:
                    attr = '0'
                    text = at.split("m",1)

                list_attr = []
                for i in attr.split(';'):
                    i = re.sub("[^0-9]", "", i)
                    i = i.lstrip('0')
                    if i == '':
                        i = '0'
                    list_attr.append(int(i))

                list_attr.sort()
                fg = -1
                bg = -1

                for elem in list_attr:
                    if elem <= 29:
                        pass
                    elif elem <= 37:
                        fg = elem - 30
                    elif elem <= 47:
                        bg = elem - 40
                    elif elem <= 94:
                        fg = fg + 8
                    elif elem >= 100 and elem <= 104:
                        bg = bg + 8

                if color_list[fg]:
                    fgcolor = color_list[fg]

                if color_list[bg]:
                    bgcolor = color_list[bg]

                if fg < 0:
                    fgcolor = 'white'

                if bg < 0:
                    bgcolor = 'black'

                if list_attr == [0]:
                    fgcolor = 'black'
                    bgcolor = 'white'

                if fgcolor == 'black':
                    fgcolor = 'white'

                if fgcolor == 'light gray':
                    fgcolor = 'dark gray'

                if bgcolor == 'white':
                    bgcolor = 'black'

                if 'dark' in fgcolor:
                    fgcolor = fgcolor.replace('dark', 'light')

                if fgcolor not in color_list:
                    fgcolor = 'white'

                if bgcolor not in color_list:
                    fgcolor = 'black'

                if not text:
                    # 0m is VT100 reset code
                    if at == '0m':
                        continue;

                    fgcolor = 'white'
                    bgcolor = 'black'
                    text = at

                formated_text.append((urwid.AttrSpec(fgcolor, bgcolor), text))
        else :
            formated_text.append((urwid.AttrSpec('white', 'black'), raw_text))
    
        return formated_text
                
Exemplo n.º 17
0
 def configure_unicode(encoding='utf-8'):
     """Configure `sys.stdout` and `sys.stderr` to be in Unicode (Do nothing if Python 3)."""
     sys.stdout = getwriter(encoding)(sys.stdout)
     sys.stderr = getwriter(encoding)(sys.stderr)
Exemplo n.º 18
0
    print(book.title + " @ " + book.url)
    for c in book.chapters:
        print(c.url)
        for l in c.summary:
            print('   * ' + l)

    for v in book.chapters[0].verses:
        print(str(v.number) + ": " + v.text)

    bible.addBook(book)
    return bible


def saveToFile(fname, xml):
    f = open(fname, "w")
    f.write(xml)
    f.close()


if __name__ == '__main__':
    UTF8Writer = getwriter('utf8')
    sys.stdout = UTF8Writer(sys.stdout)

    session = requests.Session()
    #   bible = testParseBook(session)
    #   fname = 'genezis.xml'
    bible = parseBible(session, base_url)
    fname = 'revufo.xml'
    zefaniaDom = exportToZefaniaXML(bible)
    saveToFile(fname, tostring(zefaniaDom, 'UTF-8'))
Exemplo n.º 19
0
"""
from __future__ import unicode_literals, print_function

import os
import sys
import datetime
import dotenv
from kitchen.text.converters import getwriter

# Set default encoding for the entire app
reload(sys)
sys.setdefaultencoding('UTF8')

# very important when printing unicode strings
sys.stdout = getwriter('utf8')(sys.stdout)
sys.stderr = getwriter('utf8')(sys.stderr)

SRC_DIR = os.path.dirname(os.path.realpath(__file__))
SHOUTIT_ENV = os.environ.get('SHOUTIT_ENV', 'local')

# Local or Dev or Prod
LOCAL = SHOUTIT_ENV == 'local'

# Read env variables from .env file based on `SHOUTIT_ENV`
env_file = os.path.join(SRC_DIR, 'configs', SHOUTIT_ENV + '.env')
dotenv.read_dotenv(env_file)


def info(*args):
    _now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
Exemplo n.º 20
0
#!?usr/bin/env python
# -*- coding: utf-8 -*-

import sys
from kitchen.text.converters import getwriter

utf8writer = getwriter('utf8')
sys.stdout = utf8writer(sys.stdout)
print u'café'
print 'café'
Exemplo n.º 21
0
def main():
	#initialize witica

	signal.signal(signal.SIGINT, signal_handler) #abort on CTRL-C

	UTF8Writer = getwriter('utf8')
	sys.stdout = UTF8Writer(sys.stdout)

	extractor.register_default_extractors()

	target.register("WebTarget", web.WebTarget)
	target.register("StaticHtmlTarget", statichtml.StaticHtmlTarget)

	parser = argparse.ArgumentParser(description="Reads contents from a source, converts them and publishes to one or more targets.")
	parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + VERSION)

	subparsers = parser.add_subparsers(title='sub-commands', help='sub-commands')

	#init command parser
	parser_init = subparsers.add_parser('init', help='inits a source with an example web site (WARNING: modifies the current working dir)')
	parser_init.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_init.set_defaults(func=init_command)

	#upgrade command parser
	parser_upgrade = subparsers.add_parser('upgrade', help='upgrades targets in the source to the newest version of witica.js (WARNING: modifies the current working dir)')
	parser_upgrade.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_upgrade.add_argument('-f', '--force', action='store_true', help="don't ask before overwriting files")
	parser_upgrade.add_argument('-t', '--targets', nargs='+', help="list of ids of targets that should be upgraded")
	parser_upgrade.set_defaults(func=upgrade_command)

	#update command parser
	parser_update = subparsers.add_parser('update', help='fetch changes and update targets')
	parser_update.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_update.add_argument('-s', '--source', help="the source configuration file to use")
	parser_update.add_argument('-d', '--deamon', action='store_true', help="keep running in background and process incoming events")
	parser_update.add_argument('-t', '--targets', nargs='+', help="list of ids of targets that should be used for the conversion")
	parser_update.set_defaults(func=update_command)

	#rebuild command parser
	parser_rebuild = subparsers.add_parser('rebuild', help='update single items or indicies')
	parser_rebuild.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_rebuild.add_argument('-s', '--source', help="the source configuration file to use")
	parser_rebuild.add_argument('-t', '--targets', nargs='+', help="list of ids of targets that should be used for the conversion, default: all")
	parser_rebuild.add_argument('item', nargs='*', help="list of ids of items or indicies that should be updated")
	parser_rebuild.set_defaults(func=rebuild_command)

	#check command parser
	parser_check = subparsers.add_parser('check', help='checks the integrity of the source')
	parser_check.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_check.add_argument('-s', '--source', help="the source configuration file to use")
	parser_check.add_argument('-y', '--severity', nargs=1, default=['0'], choices=[str(x) for x in range(0,11)], help="only show issues with minimum severity level (0..10)")
	parser_check.add_argument('item', nargs='*', help="list of ids of items or indicies that should be checked")
	parser_check.set_defaults(func=check_command)

	#items command parser
	parser_list = subparsers.add_parser('list', help='lists available item ids')
	parser_list.add_argument('-V', '--verbose', action='store_true', help="show also info messages and debbuging info")
	parser_list.add_argument('-s', '--source', help="the source configuration file to use")
	parser_list.add_argument('item', nargs='*', help="list of ids of items or indicies that should be included")
	parser_list.set_defaults(func=list_command)

	args = parser.parse_args()
	args.func(args)

	#to receive sigint, continue program until all threads stopped
	while threading.active_count() > 1:
		try:
			# Join all threads to receive sigint
			[t.join(1) for t in threading.enumerate() if t.isAlive() and not(t == threading.current_thread() or t == Logger.get_thread())]
			
			#if only the main and logging threads are running stop program
			working = False
			for t in threading.enumerate():
				if t.isAlive() and not(t == threading.current_thread() or t == Logger.get_thread()):
					working = True

			if not working:
				shutdown()
				break

			#print("Running threads:")
			#for t in threading.enumerate():
			#	if not(t == threading.current_thread()) and t.isAlive():
			#		print("* " + t.name)
		except KeyboardInterrupt:
			signal_handler(signal.SIGINT, None)
Exemplo n.º 22
0
def main():
    if (not args.no_upgrade) and is_virtualenv():
        update(git=False)

    if os.path.dirname(sys.argv[0]):
        os.chdir(os.path.dirname(sys.argv[0]))

    from system.logging.logger import getLogger
    from system import constants
    from system.decorators import threads

    sys.stdout = getwriter('utf-8')(sys.stdout)
    sys.stderr = getwriter('utf-8')(sys.stderr)

    ultros = Ultros(args)
    versions = VersionManager()

    if not os.path.exists("logs"):
        os.mkdir("logs")

    logger = getLogger("System")

    requests_log = logging.getLogger("requests")
    requests_log.setLevel(logging.WARNING)

    logger.info(_("Starting up, version \"%s\"") % constants.__version__)
    logger.info(constants.__version_info__)

    # Write PID to file
    fh = open("ultros.pid", "w")
    fh.write(str(os.getpid()))
    fh.flush()
    fh.close()

    logger.info(_("PID: %s") % os.getpid())

    try:
        logger.debug("Starting..")
        ultros.start()

    except Exception:
        logger.critical(_("Runtime error - process cannot continue!"))
        logger.exception("")
    except SystemExit as e:
        logger.trace("SystemExit caught!")

        logger.debug("Stopping threadpool..")
        threads.pool.stop()

        logger.debug("Removing pidfile..")
        os.remove("ultros.pid")
        exit(e.code)
    finally:
        try:
            logger.debug("Unloading manager..")
            ultros.stop()

            logger.debug("Stopping threadpool..")
            threads.pool.stop()

            logger.debug("Removing pidfile..")
            os.remove("ultros.pid")

            if args.catch:
                raw_input(_("Press enter to exit."))
        except Exception:
            pass
Exemplo n.º 23
0
import sys
from kitchen.text.converters import getwriter, to_bytes
import os
from requests_oauthlib import OAuth1
import requests
import simplejson as json
from utils import flatten_json 
import pandas as pd

# Sets system out to print unicode
UTF8Writer = getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)


def get_n_tweets(n_tweets,track):
  """Returns 100 of the most recent tweets from Twitter's API."""
  auth = get_twitter_auth()
  return get_twitter_data(auth,n_tweets,track)

def get_twitter_auth():
  '''Returns a Twitter auth object'''
  # Reads in all API keys from environment
  try:
    consumer_key = os.environ['TWITTER_CONSUMER_KEY']
    consumer_secret = os.environ['TWITTER_CONSUMER_SECRET_KEY']
    access_token = os.environ['TWITTER_ACCESS_KEY']
    access_token_secret = os.environ['TWITTER_ACCESS_SECRET_KEY']
  except KeyError:
    print("Please set twitter authentication environment variables.")
    raise
Exemplo n.º 24
0
 def configure_unicode(encoding='utf-8'):
     """Configure `sys.stdout` and `sys.stderr` to be in Unicode (Do nothing if Python 3)."""
     sys.stdout = getwriter(encoding)(sys.stdout)
     sys.stderr = getwriter(encoding)(sys.stderr)
 def test_error_handlers(self):
     '''Test setting alternate error handlers'''
     writer = converters.getwriter('latin1')
     io = writer(self.io, errors='strict')
     tools.assert_raises(UnicodeEncodeError, io.write, self.u_japanese)
from kitchen.i18n import get_translation_object

if __name__ == '__main__':
    # Setup gettext driven translations but use the kitchen functions so
    # we don't have the mismatched bytes-unicode issues.
    translations = get_translation_object('example')
    # We use _() for marking strings that we operate on as unicode
    # This is pretty much everything
    _ = translations.ugettext
    # And b_() for marking strings that we operate on as bytes.
    # This is limited to exceptions
    b_ = translations.lgettext

    # Setup stdout
    encoding = locale.getpreferredencoding()
    Writer = getwriter(encoding)
    sys.stdout = Writer(sys.stdout)

    # Load data.  Format is filename\0description
    # description should be utf-8 but filename can be any legal filename
    # on the filesystem
    # Sample datafile.txt:
    #   /etc/shells\x00Shells available on caf\xc3\xa9.lan
    #   /var/tmp/file\xff\x00File with non-utf8 data in the filename
    #
    # And to create /var/tmp/file\xff (under bash or zsh) do:
    #   echo 'Some data' > /var/tmp/file$'\377'
    datafile = open('datafile.txt', 'r')
    data = {}
    for line in datafile:
        # We're going to keep filename as bytes because we will need the