Example #1
0
def _init_console_encoding():
    # utf-8, cp949, euc-kr등으로 자동감지하는 경우에는 그대로 사용하고, 그 외에는 utf-8로 통일!
    encoding = sys.stdout.encoding
    if encoding.replace('-', '').lower() not in ['utf8', 'cp949', 'euckr']:
        sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
        sys.stderr = codecs.getwriter('utf-8')(sys.stderr.detach())
        print('Warn! Current console encoding {} is not supported. Now, using UTF-8 encoding.'.format(encoding))
Example #2
0
def unicode_output_stream(stream):
    """Get wrapper for given stream that writes any unicode without exception

    Characters that can't be coerced to the encoding of the stream, or 'ascii'
    if valid encoding is not found, will be replaced. The original stream may
    be returned in situations where a wrapper is determined unneeded.

    The wrapper only allows unicode to be written, not non-ascii bytestrings,
    which is a good thing to ensure sanity and sanitation.
    """
    if sys.platform == "cli":
        # Best to never encode before writing in IronPython
        return stream
    try:
        writer = codecs.getwriter(stream.encoding or "")
    except (AttributeError, LookupError):
        # GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
        #                different handling as it doesn't want bytestrings
        return codecs.getwriter("ascii")(stream, "replace")
    if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
        # The current stream has a unicode encoding so no error handler is needed
        if sys.version_info > (3, 0):
            return stream
        return writer(stream)
    if sys.version_info > (3, 0):
        # Python 3 doesn't seem to make this easy, handle a common case
        try:
            return stream.__class__(stream.buffer, stream.encoding, "replace",
                stream.newlines, stream.line_buffering)
        except AttributeError:
            pass
    return writer(stream, "replace")    
Example #3
0
    def parseArgs(self):
        global opts
        encoding = locale.getpreferredencoding()
        sys.stdout = codecs.getwriter(encoding)(sys.stdout, errors = "replace")
        sys.stderr = codecs.getwriter(encoding)(sys.stderr, errors = "replace")

        parser = utils.MyParser(version=__version__, usage="%prog [seatmlfile]",
                                description=__doc__)

        #utils.addmanageropts(parser)
        #parser.add_option('-g', '--gui', dest='guimode', action="store_true",
        #                  default=False,
        #                  help='show file open dialog in GUI')

        parser.add_option('-f', '--config-file', dest='config_file', type="string",
			 help='apply configuration file')

        parser.add_option('-n', '--name', dest='naming_format', type="string",
                          help='set naming format' )

        try:
            opts, args = parser.parse_args()
        except optparse.OptionError, e:
            print >>sys.stderr, 'OptionError:', e
            sys.exit(1)
Example #4
0
 def __call__(self):
     stdout_sio = codecs.getwriter(_encoding)(StringIO.StringIO())
     stderr_sio = codecs.getwriter(_encoding)(StringIO.StringIO())
     process = None
     exception_queue = squeue.Queue()
     print(repr(self.command))
     process = subprocess.Popen(shlex.split(self.command.encode(_encoding)),bufsize=1,stdin=None,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=False)
     try:
         stdout_reader = StreamReader([forward(stdout_sio),self.stdout_callback],process.stdout,exception_queue)
         stderr_reader = StreamReader([forward(stderr_sio),self.stderr_callback],process.stderr,exception_queue)
         stdout_reader.start();
         stderr_reader.start();
         while process.poll() is None:
             try:
                 exception = exception_queue.get(timeout=0.1)
                 print(repr(exception))
             except squeue.Empty:
                 pass
             else:
                 raise exception
             self.periodic_notifier()
     except:
         process.terminate();
         raise
     return process.returncode,stdout_sio.getvalue(),stderr_sio.getvalue()
Example #5
0
 def __init__(self):
     super(Console, self).__init__()
     self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
     self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
     ctypes.windll.kernel32.SetConsoleOutputCP(65001)
     ctypes.windll.kernel32.SetConsoleCP(65001)
     # ANSI handling available through SetConsoleMode since Windows 10 v1511 
     # https://en.wikipedia.org/wiki/ANSI_escape_code#cite_note-win10th2-1
     # if platform.release() == '10' and int(platform.version().split('.')[2]) > 10586:
     ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
     import ctypes.wintypes as wintypes
     if not hasattr(wintypes, 'LPDWORD'): # PY2
         wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
     SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
     GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
     GetStdHandle = ctypes.windll.kernel32.GetStdHandle
     mode = wintypes.DWORD()
     GetConsoleMode(GetStdHandle(-11), ctypes.byref(mode))
     if (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0:
         SetConsoleMode(GetStdHandle(-11), mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
         self._saved_cm = mode
     self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
     # the change of the code page is not propagated to Python, manually fix it
     sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
     sys.stdout = self.output
     self.output.encoding = 'UTF-8'  # needed for input
Example #6
0
def _write_with_fallback(s, write, fileobj):
    """Write the supplied string with the given write function like
    ``write(s)``, but use a writer for the locale's preferred encoding in case
    of a UnicodeEncodeError.  Failing that attempt to write with 'utf-8' or
    'latin-1'.
    """
    try:
        write(s)
        return write
    except UnicodeEncodeError:
        # Let's try the next approach...
        pass

    enc = locale.getpreferredencoding()
    try:
        Writer = codecs.getwriter(enc)
    except LookupError:
        Writer = codecs.getwriter(_DEFAULT_ENCODING)

    f = Writer(fileobj)
    write = f.write

    try:
        write(s)
        return write
    except UnicodeEncodeError:
        Writer = codecs.getwriter('latin-1')
        f = Writer(fileobj)
        write = f.write

    # If this doesn't work let the exception bubble up; I'm out of ideas
    write(s)
    return write
Example #7
0
def set_encoding():
    #
    # XXX UTF-8 won't encode to latin-1/ISO8859-1:
    #   UnicodeEncodeError: 'latin-1' codec can't encode character '\u2019'
    #
    # do PYTHONIOENCODING=utf8 equivalent
    #
    encoding = 'utf-8'

    if hasattr(sys.stdin, 'detach'):
        # >= 3.1
        import io

        for s in ('stdin', 'stdout', 'stderr'):
            line_buffering = getattr(sys, s).line_buffering
#            print(s, line_buffering, file=sys.stderr)
            setattr(sys, s, io.TextIOWrapper(getattr(sys, s).detach(),
                                             encoding=encoding,
                                             line_buffering=line_buffering))

    else:
        import codecs

        sys.stdin = codecs.getreader(encoding)(sys.stdin)
        sys.stdout = codecs.getwriter(encoding)(sys.stdout)
        sys.stderr = codecs.getwriter(encoding)(sys.stderr)
Example #8
0
def main():
    # log settings
    # log format
    #logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s')

    sys.stdout = codecs.getwriter('utf8')(sys.stdout)
    sys.stderr = codecs.getwriter('utf8')(sys.stderr)

    config = init_config()
    if not config:
        return

    logger.log('[x] Sekolah Pokemon v2.0 by Bhagas', 'green')
    logger.log('[x] Menjalankan Konfigurasi', 'yellow')

    try:
        bot = PokemonGoBot(config)
        bot.start()

        logger.log('[x] Menjalankan PokemonGo Bot....', 'green')

        while (True):
            bot.take_step()

    except KeyboardInterrupt:
        logger.log('[x] Keluar PokemonGo Bot', 'red')
Example #9
0
File: tff.py Project: lurdan/tff
    def __init__(self,
                 output,
                 termenc='UTF-8',
                 scanner=DefaultScanner(),
                 handler=DefaultHandler(),
                 buffering=False):
        self.__termenc = termenc
        self.__scanner = scanner
        self.__handler = handler
        self._c1 = 0

        if buffering:
            try:
                from cStringIO import StringIO
                self._output = codecs.getwriter(termenc)(StringIO())
            except ImportError:
                try:
                    from StringIO import StringIO
                    self._output = codecs.getwriter(termenc)(StringIO())
                except ImportError:
                    from io import StringIO
                    self._output = codecs.getwriter(termenc)(StringIO())
        else:
            self._output = codecs.getwriter(termenc)(output)
        self._target_output = output
        self._buffering = buffering
Example #10
0
def main():

    logging.basicConfig(level=logging.INFO)
    logging.getLogger('suds.client').setLevel(logging.FATAL)

    sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr)

    parser = OptionParser()

    parser.add_option("",
                      "--target_dir",
                      dest="target_dir",
                      help="HTML report target directory. The script will place the cnaf ggus report in the given directory.")


    (options, args) = parser.parse_args()

    print "cnaf-mon v. %s. " % __version__

    if options.target_dir is None:
        print >> sys.stderr, "Please set the --target_dir option!"
        sys.exit(2)

    check_ticket_status(report_dir=options.target_dir)
Example #11
0
def process_args(args):
    if not (args.ml or args.rb):
        args.rb = True
    if args.infile:
        ifp = io.open(args.infile, encoding='utf-8')
    else:
        if sys.version_info[0] >= 3:
            ifp = codecs.getreader('utf8')(sys.stdin.buffer)
        else:
            ifp = codecs.getreader('utf8')(sys.stdin)

    if args.outfile:
        ofp = io.open(args.outfile, mode='w', encoding='utf-8')
    else:
        if sys.version_info[0] >= 3:
            ofp = codecs.getwriter('utf8')(sys.stdout.buffer)
        else:
            ofp = codecs.getwriter('utf8')(sys.stdout)

    # initialize transliterator object
    trn = Transliterator(args.source,
                         args.target,
                         rb=args.rb,
                         build_lookup=args.build_lookup)

    # transliterate text
    for line in ifp:
        tline = trn.convert(line)
        ofp.write(tline)

    # close files
    ifp.close()
    ofp.close()
Example #12
0
def checkFiles(recurse, wrap, paths):
    # This method checks the input argument path(s) for existing files and
    # analyses them

    # Find existing files in the given input path(s)
    findFiles(recurse, paths)

    # If there are no valid input files then exit program
    checkNoInput(existingFiles)

    # Set encoding of the terminal to UTF-8
    if config.PYTHON_VERSION.startswith(config.PYTHON_2):
        out = codecs.getwriter(config.UTF8_ENCODING)(sys.stdout)
    elif config.PYTHON_VERSION.startswith(config.PYTHON_3):
        out = codecs.getwriter(config.UTF8_ENCODING)(sys.stdout.buffer)

    # Wrap the xml output in <results> element, if wrapper flag is true
    if wrap:
        out.write("<?xml version='1.0' encoding='UTF-8'?>\n<results>\n")
    else:
        out.write("<?xml version='1.0' encoding='UTF-8'?>\n")

    # Process the input files
    for path in existingFiles:

        # Analyse file
        xmlElement = checkOneFile(path)
        
        # Write output to stdout
        writeElement(xmlElement,out)
        
    # Close </results> element if wrapper flag is true
    if wrap:
        out.write("</results>\n")
Example #13
0
def main():
  sys.stdin = codecs.getreader('utf-8')(sys.stdin)
  sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
  sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
  logging.basicConfig(level = logging.INFO)

  print '// Automatically generated by mozc'
  print '#ifndef MOZC_SESSION_QUALITY_MAIN_DATA_H_'
  print '#define MOZC_SESSION_QUALITY_MAIN_DATA_H_'
  print ''
  print 'namespace mozc {'
  print 'struct TestCase {'
  print '  const char* source;'
  print '  const char* expected_result;'
  print '  const char* hiragana_sentence;'
  print '};'
  print ''
  print 'static TestCase test_cases[] = {'

  for filename in sys.argv[1:]:
    convert_tsv(filename)

  print '  {NULL, NULL, NULL}'
  print '};'
  print '}  // namespace mozc'
  print '#endif  // MOZC_SESSION_QUALITY_MAIN_DATA_H_'
Example #14
0
    def cmd_expect(self, command, **kwargs):
        from pexpect import spawnu

        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('show_output', self.log_level > 1)
        sensible = kwargs.pop('sensible', False)
        show_output = kwargs.pop('show_output')

        if show_output:
            if IS_PY3:
                kwargs['logfile'] = codecs.getwriter('utf8')(stdout.buffer)
            else:
                kwargs['logfile'] = codecs.getwriter('utf8')(stdout)

        if not sensible:
            self.debug('Run (expect) {0!r}'.format(command))
        else:
            self.debug('Run (expect) {0!r} ...'.format(command.split()[0]))

        self.debug('Cwd {}'.format(kwargs.get('cwd')))
        return spawnu(command, **kwargs)
Example #15
0
    def run(self, argv):
        """Runs mach with arguments provided from the command line."""

        # If no encoding is defined, we default to UTF-8 because without this
        # Python 2.7 will assume the default encoding of ASCII. This will blow
        # up with UnicodeEncodeError as soon as it encounters a non-ASCII
        # character in a unicode instance. We simply install a wrapper around
        # the streams and restore once we have finished.
        orig_stdin = sys.stdin
        orig_stdout = sys.stdout
        orig_stderr = sys.stderr

        try:
            if sys.stdin.encoding is None:
                sys.stdin = codecs.getreader('utf-8')(sys.stdin)

            if sys.stdout.encoding is None:
                sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

            if sys.stderr.encoding is None:
                sys.stderr = codecs.getwriter('utf-8')(sys.stderr)

            self._run(argv)
        finally:
            sys.stdin = orig_stdin
            sys.stdout = orig_stdout
            sys.stderr = orig_stderr
Example #16
0
def setup_console(sys_enc="utf-8"):
    """
    Set sys.defaultencoding to `sys_enc` and update stdout/stderr writers to corresponding encoding
    For Win32 the OEM console encoding will be used istead of `sys_enc`
    """
    reload(sys)
    try:
        if sys.platform.startswith("win"):
            import ctypes
            enc = "cp%d" % ctypes.windll.kernel32.GetOEMCP()
            #TODO: win64/python64 implementation
        else:
            enc = (sys.stdout.encoding if sys.stdout.isatty() else
                    sys.stderr.encoding if sys.stderr.isatty() else
                    sys.getfilesystemencoding() or sys_enc)

        sys.setdefaultencoding(sys_enc)

        # redefine stdout/stderr in console
        if sys.stdout.isatty() and sys.stdout.encoding != enc:
            sys.stdout = codecs.getwriter(enc)(sys.stdout, 'replace')

        if sys.stderr.isatty() and sys.stderr.encoding != enc:
            sys.stderr = codecs.getwriter(enc)(sys.stderr, 'replace')

    except:
        pass
    print(sys.getdefaultencoding())
    print(locale.getdefaultlocale())
Example #17
0
def main():
    # log settings
    # log format
    #logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s')

    sys.stdout = codecs.getwriter('utf8')(sys.stdout)
    sys.stderr = codecs.getwriter('utf8')(sys.stderr)

    config = init_config()
    if not config:
        return

    logger.log('[x] PokemonGO Bot v1.0', 'green')
    logger.log('[x] Configuration initialized', 'yellow')

    try:
        bot = PokemonGoBot(config)
        bot.start()

        logger.log('[x] Starting PokemonGo Bot....', 'green')

        while True:
            bot.take_step()

    except KeyboardInterrupt:
        logger.log('[x] Exiting PokemonGo Bot', 'red')
Example #18
0
    def __init__(self, **kwargs):
        if sys.version_info > (3, 0):
            if sys.stdout.encoding != 'UTF-8':
                sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
            if sys.stderr.encoding != 'UTF-8':
                sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')
        else:
            if sys.stdout.encoding != 'UTF-8':
                sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'strict')
            if sys.stderr.encoding != 'UTF-8':
                sys.stderr = codecs.getwriter('utf-8')(sys.stderr, 'strict')
        self.aws_region = kwargs.get('aws_region')
        self.aws_access_key_id = kwargs.get('aws_access_key_id')
        self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
        self.aws_session_token = kwargs.get('aws_session_token')
        self.log_group_name = kwargs.get('log_group_name')
        self.log_stream_name = kwargs.get('log_stream_name')
        self.filter_pattern = kwargs.get('filter_pattern')
        self.watch = kwargs.get('watch')
        self.color_enabled = kwargs.get('color_enabled')
        self.output_stream_enabled = kwargs.get('output_stream_enabled')
        self.output_group_enabled = kwargs.get('output_group_enabled')
        self.output_timestamp_enabled = kwargs.get('output_timestamp_enabled')
        self.output_ingestion_time_enabled = kwargs.get(
            'output_ingestion_time_enabled')
        self.start = self.parse_datetime(kwargs.get('start'))
        self.end = self.parse_datetime(kwargs.get('end'))

        self.client = boto3.client(
            'logs',
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key,
            aws_session_token=self.aws_session_token,
            region_name=self.aws_region
        )
Example #19
0
 def wrap_fp(fp):
     if suffix == ".gz":
         fp = GzipFile(fileobj=fp, mode=mode)
     elif suffix == ".bz2":
         try:
             fp = BZ2File(fp, mode=mode)
         except TypeError:
             if sys.version_info < (3, 0, 0):
                 raise NotImplementedError("built-in BZ2File is partially broken in python 2, install bz2file from pypi or use a compression setting other than 'bz2'")
             else:
                 raise
     elif suffix == ".xz":
         fp = LZMAFile(fp, mode=mode)
     if (suffix or sys.version_info < (3,)) and "b" not in mode:
         # If mode is not binary (and we expect to be able to
         # write() str values, not bytes), need need to create
         # an additional encoding wrapper. That encoder can
         # probably use UTF-8 without any need for additional
         # configuration
         if "r" in mode and "w" in mode:
             fp = StreamReaderWriter(fp, codecs.getreader("utf-8"),
                                     codecs.getwriter("utf-8"))
         elif "w" in mode:
             fp = codecs.getwriter("utf-8")(fp)
         elif suffix:
             fp = codecs.getreader("utf-8")(fp)
     fp.realname = filename
     return fp
Example #20
0
def main():
    logging.basicConfig(level=logging.INFO)

    sys.stdout = codecs.getwriter('utf-8')(os.fdopen(sys.stdout.fileno(), 'w', 0))
    sys.stderr = codecs.getwriter('utf-8')(os.fdopen(sys.stderr.fileno(), 'w', 0))

    judgeenv.load_env(cli=True, testsuite=True)
    judgeenv.env['id'] = 'testsuite'
    executors.load_executors()

    executor_fail = not all(name in executors.executors for name in required_executors)
    if executor_fail:
        print(ansi_style('#ansi[A required executor failed to load.](red|bold)'))
    else:
        print(ansi_style('#ansi[All required executors loaded successfully.](green|bold)'))
    print()

    tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
    fails = tester.test_all()
    print()
    print('Test complete')
    if fails:
        print(ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails)
    else:
        print(ansi_style('#ansi[All cases passed.](green|bold)'))
    raise SystemExit(int(executor_fail or fails != 0))
Example #21
0
def setup_utf8():
	""" We need to set the system encoding and stdout/stderr to utf-8.
		I already know that this is officially unsupported at least on
		Python 2.x installations, but many strings in Licorn® have UTF-8
		characters inside. Our terminal	emulators are all utf-8 natively,
		and outputing UTF-8 to log files never hurted anyone. Distros we
		support are all UTF-8 enabled at the lower level.

		If for some reason you would want another encoding, just define the
		PYTHONIOENCODING environment variable. This will probably hurt though,
		because many things in Licorn® assume a modern UTF-8 underlying OS.

		Some discussions:

		- http://drj11.wordpress.com/2007/05/14/python-how-is-sysstdoutencoding-chosen/
		- http://www.haypocalc.com/wiki/Python_Unicode (in french)
		- http://stackoverflow.com/questions/1473577/writing-unicode-strings-via-sys-stdout-in-python
		- http://stackoverflow.com/questions/492483/setting-the-correct-encoding-when-piping-stdout-in-python
		- http://stackoverflow.com/questions/4374455/how-to-set-sys-stdout-encoding-in-python-3
	"""

	# WARNING: 'UTF-8' is OK, 'utf-8' is not. It borks the ipython
	# shell prompt and readline() doesn't work anymore.
	default_encoding = os.getenv('PYTHONIOENCODING', 'UTF-8')

	if sys.getdefaultencoding() != default_encoding:
		reload(sys)
		sys.setdefaultencoding(default_encoding)

	if sys.stdout.encoding != default_encoding:
		sys.stdout = codecs.getwriter(default_encoding)(sys.stdout)

	if sys.stderr.encoding != default_encoding:
		sys.stderr = codecs.getwriter(default_encoding)(sys.stderr)
Example #22
0
def setup_console(sys_enc="utf-8"):
    imp.reload(sys)
    try:
        # для win32 вызываем системную библиотечную функцию
        if sys.platform.startswith("win"):
            import ctypes
            enc = "cp%d" % ctypes.windll.kernel32.GetOEMCP() #TODO: проверить на win64/python64
        else:
            # для Linux всё, кажется, есть и так
            enc = (sys.stdout.encoding if sys.stdout.isatty() else
                        sys.stderr.encoding if sys.stderr.isatty() else
                            sys.getfilesystemencoding() or sys_enc)

        # кодировка для sys
        sys.setdefaultencoding(sys_enc)

        # переопределяем стандартные потоки вывода, если они не перенаправлены
        if sys.stdout.isatty() and sys.stdout.encoding != enc:
            sys.stdout = codecs.getwriter(enc)(sys.stdout, 'replace')

        if sys.stderr.isatty() and sys.stderr.encoding != enc:
            sys.stderr = codecs.getwriter(enc)(sys.stderr, 'replace')

    except:
        pass # Ошибка? Всё равно какая - работаем по-старому...
Example #23
0
    def __init__(self, parent= None,
                       description="A command line tool",
                       **kwargs ):

        self.nErrors= 0
        self.parent= parent
        
        self.config= self.defaultConfiguration()
        
        if self.parent:
            self.importConfiguration(parent.config)
        
        #Encoding is not set e.g. when piping. Maybe move this to Logger ?
        if self.config.outStream.encoding is None:
            self.config.outStream= getwriter("utf8")(self.config.outStream)

        if self.config.logStream.encoding is None:
            self.config.logStream= getwriter("utf8")(self.config.logStream)

        self.parser= argparse.ArgumentParser( add_help= False,
                                              description=description,
                                              **kwargs )
        
        self.parser.add_argument("-h", "--help",
                                 action= 'store_true')
Example #24
0
def xopen(filename, mode='r'):
    """
    Replacement for the "open" function that can also open
    files that have been compressed with gzip. If the filename ends with .gz,
    the file is opened with gzip.open(). If it doesn't, the regular open()
    is used. If the filename is '-', standard output (mode 'w') or input
    (mode 'r') is returned.
    """
    assert isinstance(filename, str)
    if filename == '-':
        return sys.stdin if 'r' in mode else sys.stdout
    if filename.endswith('.gz'):
        if sys.version_info[0] < 3:
            if 'r' in mode:
                return buffered_reader(gzip.open(filename, mode))
            else:
                return gzip.open(filename, mode)
        else:
            if 'r' in mode:
                return getreader('ascii')(gzip.open(filename, mode))
            else:
                return getwriter('ascii')(gzip.open(filename, mode))
    elif filename.endswith('.bz2'):
        if sys.version_info[0] < 3:
            return bz2.BZ2File(filename, mode)
        else:
            if 'r' in mode:
                return getreader('ascii')(bz2.BZ2File(filename, mode))
            else:
                return getwriter('ascii')(bz2.BZ2File(filename, mode))
    else:
        return open(filename, mode)
Example #25
0
    def __init__(self, action_list=None, pipeline=None, stdout=None, stderr=None, recovery=None):
        self.action_list = action_list or []
        self.pipeline = pipeline
        if pipeline is None:
            self.pipeline = json.loads(os.environ.get('PIPELINE', 'null'))
            if self.pipeline is None:
                self.pipeline = []

        if stdout is None:
            stdout = codecs.getwriter('utf-8')(sys.stdout)

        if stderr is None:
            stderr = codecs.getwriter('utf-8')(sys.stderr)

        self.stdout = stdout
        self.stderr = stderr
        exec_pypeline.Pipeline.__init__(
            self, action_list or self.action_list,
            before_action=self.before_action,
            after_action=self.after_action,
            before_forward=self.before_forward,
            before_backward=self.before_backward,
            recovery=recovery
        )
        store.Store.__init__(self, self.stdout, self.stderr)

        self._init_actions()
        self.notify_actions()
        self._failed_action = None
        self._failed_err = None
Example #26
0
def resetOutputEncoding():
    import sys
    import codecs
    if sys.stdout.encoding != 'cp850':
        sys.stdout = codecs.getwriter('cp850')(sys.stdout, 'strict')
    if sys.stderr.encoding != 'cp850':
        sys.stderr = codecs.getwriter('cp850')(sys.stderr, 'strict')
Example #27
0
 def main(self, *arguments):
     # Decode arguments.
     program = None
     import getopt
     options, arguments = getopt.getopt(arguments, 'de:hl:u')
     for option, value in options:
         if option == '-d':
             program = database.dump_database,
         elif option == '-e':
             program = database.erase_entries, value
         elif option == '-h':
             sys.stdout.write(__doc__)
             return
         elif option == '-l':
             program = database.load_entries, value
         elif option == '-u':
             program = database.undump_database,
     if program is None:
         if arguments:
             program = self.start_bot, arguments[0]
             arguments = arguments[1:]
         else:
             program = self.start_bot, os.environ['IRCSERVER']
     assert not arguments, arguments
     sys.stdout = codecs.getwriter(file_encoding)(
         sys.stdout, 'backslashreplace')
     sys.stderr = codecs.getwriter(file_encoding)(
         sys.stderr, 'backslashreplace')
     program[0](*program[1:])
def main():
    global dictionary
    global errwriter
    global filter_gramms
    global corpusHandler

    if "-utf" in sys.argv:
      errwriter = codecs.getwriter("utf-8")(sys.stderr, "xmlcharrefreplace")
    else:
      errwriter = codecs.getwriter("windows-1251")(sys.stderr, "xmlcharrefreplace")

    global WITHMONTHS
    WITHMONTHS = '-withmonths' in sys.argv

    filter_gramms = "-f" in sys.argv

    global replace_colon
    replace_colon = "--colon" in sys.argv

    corpusHandler = CorpusHandler()
    # assuming that if 4th argument doesn't start with '-', it's not some arbitrary option but
    # corpus name
    if len(sys.argv) > 4 and not sys.argv[4].startswith('-'):
        global CORPUS_NAME
        CORPUS_NAME = sys.argv[4]

    corpusHandler.load(sys.argv[1], "-utf" in sys.argv)

    convert_directory(os.path.abspath(sys.argv[2]), os.path.abspath(sys.argv[3]), os.path.abspath(sys.argv[2]))
    errwriter.close()
def main(argv):

    # Unicode I/O
    sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
    sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
    sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)

    def usage():
        print >> sys.stderr, u'Create translation task.  All input files should use UTF-8 encoding.'
        print >> sys.stderr, u''
        print >> sys.stderr, u'Realtime post-editing task with feedback:'
        print >> sys.stderr, u'{0} realtime <source> <realtime.d> <task_name> <task_dir.data>'.format(argv[0])
        print >> sys.stderr, u''
        print >> sys.stderr, u'Realtime post-editing task (no adaptation from feedback):'
        print >> sys.stderr, u'{0} realtime-static <source> <realtime.d> <task_name> <task_dir.data>'.format(argv[0])
        print >> sys.stderr, u''
        print >> sys.stderr, u'Offline task (translations pre-generated):'
        print >> sys.stderr, u'{0} offline <source> <target> <task_name> <task_dir.data>'.format(argv[0])
        print >> sys.stderr, u''
 
 
        print >> sys.stderr, u'Place output task directory in data/tasks'
        sys.exit(2)

    task = argv[1] if len(argv) > 1 else ''
    if task == track.REALTIME and len(argv[2:]) == 4:
        new_realtime(*argv[2:], learn=True)
    elif task == track.REALTIME_STATIC and len(argv[2:]) == 4:
        new_realtime(*argv[2:], learn=False)
    elif task == track.OFFLINE and len(argv[2:]) == 4:
        new_offline(*argv[2:])
    else:
        usage()
Example #30
0
def main():
	sys.stdout = codecs.getwriter('utf8')(sys.stdout)
	sys.stderr = codecs.getwriter('utf8')(sys.stderr)

	forum_db = ForumDB();
	forum_db.connect_to_db();

	print "*"*80
	print "testing forum db input ..."	
	print "*"*80
	url = "http://www.heise.de/newsticker/foren/S-Apple-Patent-fuer-Notabschaltung-von-iPhone-Co/forum-184599/list/"
	print "storing %s ..." % (url)
	forum_db.store_forum_url(url)
	print "#"*80
	print ""
	
	print "*"*80
	print "testing forum db output ..."	
	print "*"*80

	max_forums_to_spider = 10;
	counter = 1
	while 1:
		if counter > max_forums_to_spider:
			break;
		print "\tcounter: %i/%i ..." % (counter, max_forums_to_spider);
		url = forum_db.get_random_forum_url()
		if not url:
			print "\t\tgot no url ..."
			break
		print "\t\turl: " + url
		counter += 1
	forum_db.disconnect_from_db()
	print "#"*80
	print ""
Example #31
0
#!/usr/bin/env python3.5
# Add gps coordinates to website
# Usage: add.py?lat=-324.2343&lng=133.324324
import sys, codecs, cgi, cgitb, sqlite3, datetime
web_cgi = codecs.getwriter('utf-8')(sys.stdout.buffer)

HEADER = 'Content-Type:text/html;charset=utf-8'
HEADER_ERR = 'Status: 400 Bad Request\n\nUsage: add.py?lat=-324.2343&lng=133.324324'


def insert(lat, lng):
    timestamp = int(datetime.datetime.utcnow().timestamp())
    try:
        conn = sqlite3.connect('activity.sqlite3')
        c = conn.cursor()
        c.execute(
            'CREATE TABLE IF NOT EXISTS log (_id INTEGER PRIMARY KEY, lat REAL, lng REAL, timestamp INTEGER)'
        )
        conn.commit()
        c.execute('INSERT INTO log (lat, lng, timestamp) VALUES (?, ?, ?)',
                  (lat, lng, timestamp))
        conn.commit()
        print("\n\nAdded.", file=web_cgi)
    except Exception as e:
        raise
    finally:
        c.close()
        conn.close()


def main():
import codecs
import time

import datetime

import sys
from flask import Flask
from flask_script import Manager, Option
from sys import version_info
from app.server import app, db
from app.worker import backgroundWorker
from app.models import User

UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)

app = Flask(__name__)

manager = Manager(app, False)


def getInput(text):
    py3 = version_info[0] > 2 #creates boolean value for test that Python major version > 2

    if py3:
      return input(text)
    else:
      return raw_input(text)


@manager.command
Example #33
0
            plt.grid(True)

        #plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
        plt.show()

        #time.sleep(15)
        #plt.close('all')

    #def plot(self, data):


logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
locale.setlocale(locale.LC_NUMERIC, '')

# change the output encoding to utf8
sys.stdout = codecs.getwriter('utf8')(sys.stdout)

parser = argparse.ArgumentParser(description='Graph data')
parser.add_argument('-price_min',
                    dest='price_min',
                    action='store',
                    type=float,
                    default=None,
                    help='TODO')
parser.add_argument('-price_max',
                    dest='price_max',
                    action='store',
                    type=float,
                    default=None,
                    help='TODO')
Example #34
0
import codecs
import re

from trans import translate

try:
    import debug
    from pdb import set_trace as breakpoint
except:
    pass

debug_mode = False
# debug_mode = True

UTF8READER = codecs.getreader('utf8')
UTF8WRITER = codecs.getwriter('utf8')

EPILOG = """
Dependencies: python 2.6+ (maybe earlier). If the .md tag is used, install the
markdown module (http://pythonhosted.org/Markdown/index.html).
"""

FORMAT_HELP = """PREPROCESSOR INPUT SYNTAX

Notation: Grammar rules are of the form ELEMENT_TYPE -> GRAMMAR_EXPRESSION.
A grammar expression may contain the following notation.
{...} grouping, [...] optional, | or, +/* one/zero or more of the preceding form.
,.. non-empty comma-separated sequence of preceding form.
Spaces are not significant in grammar rules. Sequences are non-empty.
BOL/EOL beginning/end of line. EOF end of file.
def get_metrics(hparams, model_fn, ckpt=None):
    """Run inference and compute metrics."""
    pred_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                            model_dir=hparams.output_dir)

    benchmark_hook = BenchmarkHook(hparams.infer_batch_size)

    predictions = pred_estimator.predict(make_input_fn(
        hparams, tf.contrib.learn.ModeKeys.INFER),
                                         checkpoint_path=ckpt,
                                         hooks=[benchmark_hook])
    translations = []
    output_tokens = []
    beam_id = 0
    for prediction in predictions:
        # get the top translation.
        if beam_id == 0:
            for sent_id in range(hparams.infer_batch_size):
                if sent_id >= prediction["predictions"].shape[0]:
                    break
                trans, output_length = nmt_utils.get_translation(
                    prediction["predictions"],
                    sent_id=sent_id,
                    tgt_eos=hparams.eos,
                    subword_option=hparams.subword_option)
                translations.append(trans)
                output_tokens.append(output_length)
        beam_id += 1
        if beam_id == hparams.beam_width:
            beam_id = 0

    trans_file = os.path.join(
        hparams.output_dir, "newstest2014_out_{}.tok.de".format(
            pred_estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)))
    trans_dir = os.path.dirname(trans_file)
    if not tf.gfile.Exists(trans_dir):
        tf.gfile.MakeDirs(trans_dir)
    tf.logging.info("Writing to file %s" % trans_file)
    with codecs.getwriter("utf-8")(tf.gfile.GFile(trans_file,
                                                  mode="wb")) as trans_f:
        trans_f.write("")  # Write empty string to ensure file is created.
        for translation in translations:
            trans_f.write((translation + b"\n").decode("utf-8"))

    # Evaluation
    output_dir = os.path.join(pred_estimator.model_dir, "eval")
    tf.gfile.MakeDirs(output_dir)
    summary_writer = tf.summary.FileWriter(output_dir)

    ref_file = "%s.%s" % (hparams.test_prefix, hparams.tgt)
    # Hardcoded.
    metric = "bleu"
    score = get_sacrebleu(trans_file, hparams.detokenizer_file)

    misc_utils.print_out("bleu is %.5f" % score)
    with tf.Graph().as_default():
        summaries = []
        summaries.append(tf.Summary.Value(tag=metric, simple_value=score))
    tf_summary = tf.Summary(value=list(summaries))
    summary_writer.add_summary(
        tf_summary,
        pred_estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP))

    summary_writer.close()
    return score, benchmark_hook.get_average_speed(), sum(output_tokens)
Example #36
0
def main():
    bot = False

    def handle_sigint(*args):
        raise SIGINTRecieved
    signal.signal(signal.SIGINT, handle_sigint)

    def initialize_task(bot, config):
        tree = TreeConfigBuilder(bot, config.raw_tasks).build()
        bot.workers = tree

    def initialize(config):
        from pokemongo_bot.datastore import Datastore

        ds = Datastore(conn_str='/data/{}.db'.format(config.username))
        for directory in ['pokemongo_bot', 'pokemongo_bot/cell_workers']:
            ds.migrate(directory + '/migrations')

        bot = PokemonGoBot(ds.get_connection(), config)

        return bot

    def setup_logging(config):
        log_level = logging.ERROR

        if config.debug:
            log_level = logging.DEBUG

        logging.getLogger("requests").setLevel(log_level)
        logging.getLogger("websocket").setLevel(log_level)
        logging.getLogger("socketio").setLevel(log_level)
        logging.getLogger("engineio").setLevel(log_level)
        logging.getLogger("socketIO-client").setLevel(log_level)
        logging.getLogger("pgoapi").setLevel(log_level)
        logging.getLogger("rpc_api").setLevel(log_level)

        if config.logging:
            logging_format = '%(message)s'
            logging_format_options = ''

            if ('show_log_level' not in config.logging) or config.logging['show_log_level']:
                logging_format = '[%(levelname)s] ' + logging_format
            if ('show_process_name' not in config.logging) or config.logging['show_process_name']:
                logging_format = '[%(name)10s] ' + logging_format
            if ('show_thread_name' not in config.logging) or config.logging['show_thread_name']:
                logging_format = '[%(threadName)s] ' + logging_format
            if ('show_datetime' not in config.logging) or config.logging['show_datetime']:
                logging_format = '[%(asctime)s] ' + logging_format
                logging_format_options = '%Y-%m-%d %H:%M:%S'

            formatter = Formatter(logging_format,logging_format_options)
            for handler in logging.root.handlers[:]:
                handler.setFormatter(formatter)

    def start_bot(bot, config):
        bot.start()
        initialize_task(bot, config)
        bot.metrics.capture_stats()
        bot.health_record = BotEvent(config)
        return bot

    def get_commit_hash():
        try:
            hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
                                           stderr=subprocess.STDOUT)
            if all(c in string.hexdigits for c in hash[:-1]):
                with open('version', 'w') as f:
                    f.write(hash)
        except:
            pass

        if not os.path.exists('version'):
            return 'unknown'

        with open('version') as f:
            return f.read()[:8]

    try:
        sys.stdout = codecs.getwriter('utf8')(sys.stdout)
        sys.stderr = codecs.getwriter('utf8')(sys.stderr)

        logger.info('PokemonGO Bot v1.0')
        logger.info('commit: ' + get_commit_hash())

        config, config_file = init_config()
        if not config:
            return

        logger.info('Configuration initialized')
        health_record = BotEvent(config)
        health_record.login_success()

        setup_logging(config)

        finished = False

        while not finished:
            wait_time = config.reconnecting_timeout * 60
            try:
                bot = initialize(config)
                bot = start_bot(bot, config)
                config_changed = check_mod(config_file)

                bot.event_manager.emit(
                    'bot_start',
                    sender=bot,
                    level='info',
                    formatted='Starting bot...'
                )

                while True:
                    bot.tick()
                    if config.live_config_update_enabled and config_changed():
                        logger.info('Config changed! Applying new config.')
                        config, _ = init_config()

                        if config.live_config_update_tasks_only:
                            initialize_task(bot, config)
                        else:
                            bot = initialize(config)
                            bot = start_bot(bot, config)

            except KeyboardInterrupt:
                bot.event_manager.emit(
                    'bot_exit',
                    sender=bot,
                    level='info',
                    formatted='Exiting bot.'
                )
                finished = True
                report_summary(bot)

            except NotLoggedInException:
                bot.event_manager.emit(
                    'api_error',
                    sender=bot,
                    level='info',
                    formatted='Not logged in, reconnecting in {:d} seconds'.format(wait_time)
                )
                time.sleep(wait_time)
            except ServerBusyOrOfflineException:
                bot.event_manager.emit(
                    'api_error',
                    sender=bot,
                    level='info',
                    formatted='Server busy or offline'
                )
                time.sleep(wait_time)
            except ServerSideRequestThrottlingException:
                bot.event_manager.emit(
                    'api_error',
                    sender=bot,
                    level='info',
                    formatted='Server is throttling, reconnecting in 30 seconds'
                )
                time.sleep(30)
            except PermaBannedException:
                bot.event_manager.emit(
                    'api_error',
                    sender=bot,
                    level='info',
                    formatted='Probably permabanned, Game Over ! Play again at https://club.pokemon.com/us/pokemon-trainer-club/sign-up/'
                )
                time.sleep(36000)
            except NoPlayerPositionSetException:
                bot.event_manager.emit(
                    'api_error',
                    sender=bot,
                    level='info',
                    formatted='No player position set'
                )
                time.sleep(wait_time)

    except GeocoderQuotaExceeded:
        raise Exception("Google Maps API key over requests limit.")
    except SIGINTRecieved:
        if bot:
            bot.event_manager.emit(
                'bot_interrupted',
                sender=bot,
                level='info',
                formatted='Bot caught SIGINT. Shutting down.'
            )
            report_summary(bot)

    except Exception as e:
        # always report session summary and then raise exception
        if bot:
            report_summary(bot)

        raise
    finally:
        # Cache here on SIGTERM, or Exception.  Check data is available and worth caching.
        if bot:
            if len(bot.recent_forts) > 0 and bot.recent_forts[-1] is not None and bot.config.forts_cache_recent_forts:
                cached_forts_path = os.path.join(
                    _base_dir, 'data', 'recent-forts-%s.json' % bot.config.username
                )
                try:
                    with open(cached_forts_path, 'w') as outfile:
                        json.dump(bot.recent_forts, outfile)
                    bot.event_manager.emit(
                        'cached_fort',
                        sender=bot,
                        level='debug',
                        formatted='Forts cached.'
                    )
                except IOError as e:
                    bot.event_manager.emit(
                        'error_caching_forts',
                        sender=bot,
                        level='debug',
                        formatted='Error caching forts for {path}',
                        data={'path': cached_forts_path}
                        )
Example #37
0
    Return True if a sum is a simple.

    A sum is simple if its types have no fields, e.g.
    unaryop = Invert | Not | UAdd | USub
    """
    for t in sum.types:
        if t.fields:
            return False
    return True


#------------------------------------------------------------------------
# File Handling
#------------------------------------------------------------------------

StreamWriter = codecs.getwriter('UTF-8')


class FileAllocator(object):
    def __init__(self, output_dir=None):
        self.output_dir = output_dir

        # file_name -> file
        self.allocated_files = {}

    def open_sourcefile(self, name):
        "Allocate a file and save in in allocated_files"

    def close(self):
        for file in self.allocated_files.itervalues():
            file.close()
Example #38
0
def save_hparams(out_dir, hparams):
    """Save hparams."""
    hparams_file = os.path.join(out_dir, "hparams")
    print_out("  saving hparams to %s" % hparams_file)
    with codecs.getwriter("utf-8")(tf.gfile.GFile(hparams_file, "wb")) as f:
        f.write(hparams.to_json())
Example #39
0
    try:
        with Image.open(tempfname) as img:
            return pytesseract.image_to_string(img)
        # f.write("============================\nIMAGE_STR\n============================\n")
        # f.write(pytesseract.image_to_string(img).encode('utf-8').decode('utf-8'))
        # f.write("\n\n============================\nBOUNDING BOXES\n============================\n")
        # f.write(pytesseract.image_to_boxes(img).encode('utf-8').decode('utf-8'))
        # f.write("\n\n============================\nIMAGE_DATA\n============================\n")
        # f.write(pytesseract.image_to_data(img).encode('utf-8').decode('utf-8'))

        # print("============================\nSCRIPT INFO\n============================")
        # print(pytesseract.image_to_osd(img).encode('utf-8').decode('utf-8'))
    except IOError as e:
        print('I/O error occurred: ' + e)
    finally:
        remove(tempfname)


if __name__ == "__main__":
    if sys.stdout.encoding != 'cp850':
        sys.stdout = codecs.getwriter('cp850')(sys.stdout.buffer, 'strict')
    if sys.stderr.encoding != 'cp850':
        sys.stderr = codecs.getwriter('cp850')(sys.stderr.buffer, 'strict')

    parser = argparse.ArgumentParser()
    parser.add_argument("images", nargs='+', help="Image filename")
    args = parser.parse_args()

    runOCR(args.images)
Example #40
0
    def _print_figure(self,
                      outfile,
                      format,
                      *,
                      dpi,
                      dsc_comments,
                      orientation,
                      papertype,
                      bbox_inches_restore=None):
        """
        Render the figure to a filesystem path or a file-like object.

        Parameters are as for `.print_figure`, except that *dsc_comments* is a
        all string containing Document Structuring Convention comments,
        generated from the *metadata* parameter to `.print_figure`.
        """
        is_eps = format == 'eps'
        if not (isinstance(outfile, (str, os.PathLike))
                or is_writable_file_like(outfile)):
            raise ValueError("outfile must be a path or a file-like object")

        # find the appropriate papertype
        width, height = self.figure.get_size_inches()
        if papertype == 'auto':
            papertype = _get_papertype(
                *orientation.swap_if_landscape((width, height)))
        paper_width, paper_height = orientation.swap_if_landscape(
            papersize[papertype])

        if mpl.rcParams['ps.usedistiller']:
            # distillers improperly clip eps files if pagesize is too small
            if width > paper_width or height > paper_height:
                papertype = _get_papertype(
                    *orientation.swap_if_landscape((width, height)))
                paper_width, paper_height = orientation.swap_if_landscape(
                    papersize[papertype])

        # center the figure on the paper
        xo = 72 * 0.5 * (paper_width - width)
        yo = 72 * 0.5 * (paper_height - height)

        llx = xo
        lly = yo
        urx = llx + self.figure.bbox.width
        ury = lly + self.figure.bbox.height
        rotation = 0
        if orientation is _Orientation.landscape:
            llx, lly, urx, ury = lly, llx, ury, urx
            xo, yo = 72 * paper_height - yo, xo
            rotation = 90
        bbox = (llx, lly, urx, ury)

        self._pswriter = StringIO()

        # mixed mode rendering
        ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
        renderer = MixedModeRenderer(self.figure,
                                     width,
                                     height,
                                     dpi,
                                     ps_renderer,
                                     bbox_inches_restore=bbox_inches_restore)

        self.figure.draw(renderer)

        def print_figure_impl(fh):
            # write the PostScript headers
            if is_eps:
                print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
            else:
                print(
                    f"%!PS-Adobe-3.0\n"
                    f"%%DocumentPaperSizes: {papertype}\n"
                    f"%%Pages: 1\n",
                    end="",
                    file=fh)
            print(
                f"{dsc_comments}\n"
                f"%%Orientation: {orientation.name}\n"
                f"{get_bbox_header(bbox)[0]}\n"
                f"%%EndComments\n",
                end="",
                file=fh)

            Ndict = len(psDefs)
            print("%%BeginProlog", file=fh)
            if not mpl.rcParams['ps.useafm']:
                Ndict += len(ps_renderer._character_tracker.used)
            print("/mpldict %d dict def" % Ndict, file=fh)
            print("mpldict begin", file=fh)
            print("\n".join(psDefs), file=fh)
            if not mpl.rcParams['ps.useafm']:
                for font_path, chars \
                        in ps_renderer._character_tracker.used.items():
                    if not chars:
                        continue
                    fonttype = mpl.rcParams['ps.fonttype']
                    # Can't use more than 255 chars from a single Type 3 font.
                    if len(chars) > 255:
                        fonttype = 42
                    fh.flush()
                    if fonttype == 3:
                        fh.write(_font_to_ps_type3(font_path, chars))
                    else:  # Type 42 only.
                        _font_to_ps_type42(font_path, chars, fh)
            print("end", file=fh)
            print("%%EndProlog", file=fh)

            if not is_eps:
                print("%%Page: 1 1", file=fh)
            print("mpldict begin", file=fh)

            print("%s translate" % _nums_to_str(xo, yo), file=fh)
            if rotation:
                print("%d rotate" % rotation, file=fh)
            print("%s clipbox" % _nums_to_str(width * 72, height * 72, 0, 0),
                  file=fh)

            # write the figure
            print(self._pswriter.getvalue(), file=fh)

            # write the trailer
            print("end", file=fh)
            print("showpage", file=fh)
            if not is_eps:
                print("%%EOF", file=fh)
            fh.flush()

        if mpl.rcParams['ps.usedistiller']:
            # We are going to use an external program to process the output.
            # Write to a temporary file.
            with TemporaryDirectory() as tmpdir:
                tmpfile = os.path.join(tmpdir, "tmp.ps")
                with open(tmpfile, 'w', encoding='latin-1') as fh:
                    print_figure_impl(fh)
                if mpl.rcParams['ps.usedistiller'] == 'ghostscript':
                    _try_distill(gs_distill,
                                 tmpfile,
                                 is_eps,
                                 ptype=papertype,
                                 bbox=bbox)
                elif mpl.rcParams['ps.usedistiller'] == 'xpdf':
                    _try_distill(xpdf_distill,
                                 tmpfile,
                                 is_eps,
                                 ptype=papertype,
                                 bbox=bbox)
                _move_path_to_path_or_stream(tmpfile, outfile)

        else:  # Write directly to outfile.
            with cbook.open_file_cm(outfile, "w", encoding="latin-1") as file:
                if not file_requires_unicode(file):
                    file = codecs.getwriter("latin-1")(file)
                print_figure_impl(file)
Example #41
0
from core import globalvar
from core.utils import _
import wx
import wx.lib.mixins.listctrl as listmix

from grass.script import core as grass

from core.gcmd import GMessage, GError, DecodeString, RunCommand
from core.utils import GetListOfLocations, GetListOfMapsets
from location_wizard.dialogs import RegionDef
from gui_core.dialogs import TextEntryDialog
from gui_core.widgets import GenericValidator, StaticWrapText
from gui_core.wrap import Button, ListCtrl, StaticText, \
    StaticBox, TextCtrl

sys.stderr = codecs.getwriter('utf8')(sys.stderr)


class GRASSStartup(wx.Frame):
    exit_success = 0
    # 2 is file not found from python interpreter
    exit_user_requested = 5

    """GRASS start-up screen"""

    def __init__(self, parent=None, id=wx.ID_ANY,
                 style=wx.DEFAULT_FRAME_STYLE):

        #
        # GRASS variables
        #
Example #42
0
# coding: utf-8
import os, re
import json
import requests

import codecs, sys
sys.stdout = codecs.getwriter("UTF-8")(sys.stdout)

# this scrapes oibs64 for all the course data.
# see data_spec.md for interpreting out_file.
# be aware- spits a lot of output to stdout.

out_file = "data.json"

oibs_url = "https://oibs2.metu.edu.tr/View_Program_Course_Details_64/main.php"

# stuff for department-izing course codes.
# a course ID like 5720172 does not become aee172 on its own.
prefixes = {
    '219': u'GENE',
    '956': u'OCEA',
    '450': u'FLE',
    '612': u'PERS',
    '451': u'TEFL',
    '810': u'GWS',
    '811': u'UPL',
    '814': u'SA',
    '815': u'ARS',
    '816': u'MCS',
    '817': u'FPSY',
    '453': u'PES',
Example #43
0
#!/usr/bin/env python3
import cgi, codecs, sys

sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer) # Support Chinese

# Variables
form = cgi.FieldStorage()
template = open("../information.html", 'r')  # An html template for information
content = template.read()


# Functions
def getVal(form, attr):  # Get value from html form
    if form.getvalue(attr):
        return form.getvalue(attr)
    return ""


def arrayToString(arr):  # Convert array to string
    if type(arr) is str:
        return arr
    string = ""
    for element in arr:
        string += element
        if element != arr[-1]:  # Check if it is the last element of array
            string += ", "
    return string


# Main function
if __name__ == '__main__':
# -*- coding: utf-8 -*-

# initialize Django API

import os, sys, codecs
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trimco.settings')

import django
django.setup()

sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach(
))  # for printing; parasol server for some curious reason uses latin-1 locale

# import some models

from info.models import Speaker
from corpora.models import Recording

import xml.etree.cElementTree as ET

fields = ('string_id', 'sex', 'year_of_birth', 'language', 'education',
          'location')  # fields we want to include in an XML file

if __name__ == '__main__':

    speakers = Speaker.objects.all()  # get all speakers

    meta = ET.Element("meta")

    for speaker in speakers:
Example #45
0
import pexpect
import re
import time
import aggreg
import switches
import csv
import tokenize
import base64
import checkfunc
from netaddr import IPNetwork, IPAddress
import services

t1 = time.time()
opt_diag = None
onu_status = None
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

script_name = 'check.py'
ifload = []   # Переменная массива загрузки порта

def checkpolicy(ip):
    print(ip)
    junroutes = '/var/www/check/cgi-bin/scripts/shaper_routes.txt'
    output = open(junroutes)
    policy = 'no ip policy INET_CHANNEL2'
    for route in output:
        net, gw = route.split()[2], route.split()[3]
        #gw = gw[:-1]
        #Check ip enter to network
        if IPAddress(ip) in IPNetwork(net):
            if gw == '10.10.10.10': #Gateway Cisco 4948 Bykova13
Example #46
0
def main():
    def usage():
        sys.stderr.write(
            """usage: {0} [[-e|encoding <encoding>] -c|--clean|-s|--smudge] path
""".format(sys.argv[0]))
        return

    # end of usage()

    # main function body
    try:
        opts, args = getopt.getopt(sys.argv[1:], "ce:hs",
                                   ['clean', 'encoding', 'smudge', 'help'])
    except getopt.GetoptErrror as err:
        sys.stderr.write(str(err))
        usage()
        sys.exit(2)

    smudge = True
    enc = None
    hf = False
    for (o, a) in opts:
        if o in ('-c', '--clean'):
            smudge = False
        elif o in ('-e', '--encoding'):
            enc = a
        elif o in ('-h', '--help'):
            hf = True
        elif o in ('-s', '--smudge'):
            smudge = True
        else:
            assert False, "unhandled option {0}".format(o)

    if hf:
        usage()
        sys.exit(0)

    if len(args) != 1:
        sys.stderr.write('error:just one file path is needed')
        usage()
        sys.exit(1)

    if enc is not None:
        setdefaultencoding(enc)

    if isinstance(sys.stdin, io.TextIOWrapper):
        try:
            sys.stdin.reconfigure(encoding=_default_encoding,
                                  errors='surrogateescape')
            sys.stdout.reconfigure(encoding=_default_encoding,
                                   errors='surrogateescape')
            stdin = sys.stdin
            stdout = sys.stdout
        except AttributeError:
            stdin = codecs.getreader(_default_encoding)(sys.stdin.buffer,
                                                        'surrogateescape')
            stdout = codecs.getwriter(_default_encoding)(sys.stdout.buffer,
                                                         'surrogateescape')
    else:
        stdin = sys.stdin
        stdout = sys.stdout

    if smudge:
        substkw(stdin, stdout, expand_kwdic(args[0], read_conf(args[0])))
    else:
        unsubstkw(stdin, stdout, read_conf(args[0]))
Example #47
0
def delta(orgn, dest, searchdate, searchkey):
    if not DEV_LOCAL:
        db = customfunction.dbconnection()
        cursor = db.cursor()
        db.set_character_set('utf8')
    url = "http://www.delta.com/"
    searchid = str(searchkey)
    currentdatetime = datetime.datetime.now()
    stime = currentdatetime.strftime('%Y-%m-%d %H:%M:%S')
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

    driver = webdriver.PhantomJS(service_args=[
        '--ignore-ssl-errors=true', '--ssl-protocol=any', '--load-images=false'
    ],
                                 service_log_path='/tmp/ghostdriver.log')

    driver.set_window_size(1120, 1080)

    # driver = webdriver.Firefox()

    def storeFlag(searchkey, stime):
        if not DEV_LOCAL:
            cursor.execute(
                "INSERT INTO pexproject_flightdata (flighno,searchkeyid,scrapetime,stoppage,stoppage_station,origin,destination,duration,maincabin,maintax,firstclass,firsttax,business,businesstax,cabintype1,cabintype2,cabintype3,datasource,departdetails,arivedetails,planedetails,operatedby,economy_code,business_code,first_code) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);",
                ("flag", str(searchkey), stime, "flag", "test", "flag", "flag",
                 "flag", "0", "0", "0", "0", "0", "0", "flag", "flag", "flag",
                 "delta", "flag", "flag", "flag", "flag", "flag", "flag",
                 "flag"))
            db.commit()
        driver.quit()

    try:
        driver.get(url)
        time.sleep(1)
        flg = 0
        pageStatus = ''
        while flg < 15 and pageStatus != 'complete':
            time.sleep(1)
            print "flg", flg
            pageStatus = driver.execute_script('return document.readyState;')
            print "pageStatus", pageStatus
            flg = flg + 1
        WebDriverWait(driver, 6).until(
            EC.presence_of_element_located((By.ID, "oneWayBtn")))
        oneway = driver.find_element_by_id('oneWayBtn')
        #oneway.click()
        driver.execute_script("arguments[0].click();", oneway)
        driver.execute_script(
            "document.getElementById('originCity').setAttribute('value', '" +
            orgn.strip() + "')")
        # origin = driver.find_element_by_id("originCity")
        # origin.clear()
        # origin.send_keys(orgn.strip())
        driver.execute_script(
            "document.getElementById('destinationCity').setAttribute('value', '"
            + dest.strip() + "')")
        # destination = driver.find_element_by_id("destinationCity")
        # destination.send_keys(dest.strip())
        ddate = driver.find_element_by_id("departureDate")
        driver.execute_script(
            "document.getElementById('departureDate').setAttribute('value', '"
            + str(searchdate) + "')")
        milebtn = driver.find_element_by_id("milesBtn")
        milebtn.click()
        driver.find_element_by_id("findFlightsSubmit").send_keys(Keys.ENTER)

    except:
        print "before data page"
        storeFlag(searchkey, stime)
        return searchkey
    time.sleep(1)
    try:
        WebDriverWait(driver, 5).until(
            EC.presence_of_element_located((By.ID, "submitAdvanced")))
        print "no data"
        storeFlag(searchkey, stime)
        return searchkey
    except:
        print "Data found"
    try:
        # driver.save_screenshot('/root/out_enter.png');
        WebDriverWait(driver, 15).until(
            EC.presence_of_element_located((By.ID, "showAll-footer")))
        print "More than one page"
        driver.execute_script("""
            DWRHandler.currentPage = -1;
            var _shopInputDo=shopInputDo;
            shoppingUtil.scrollWindow("top");
            FilterFunctions.showFilterMsg();
            FlightUtil.emptyResults();
            ResummarizeFlightResultsDWR.pageResults(DWRHandler.currentPage, _shopInputDo.currentSessionCheckSum, delta.airShopping.cacheKey, {
            async: true,
            callback: function(searchResults) {
                if (searchResults != null) {
                    var jsonData = {};
                    jsonData['jsonobj'] = JSON.stringify(searchResults);
                    var cabininfo = document.getElementsByClassName('tblHeadUp')[0].innerHTML;
                    jsonData['cabinTypes'] = cabininfo;
                    localStorage.setItem('deltaData', JSON.stringify(jsonData));
                    var element = document.createElement('div');
                    element.id = "submitAdvanced";
                    element.appendChild(document.createTextNode("text"));
                    document.body.appendChild(element);
                    throw new Error("Results found");
                    if (searchResults.errorFwdURL == null) {
                        jsonResultPopulation(searchResults);
                        paginationPopulation(searchResults);
                        if (shoppingUtil.isIE8()) {
                            if (DWRHandler.currentPage == -1) {
                                ieTimeout = setTimeout("RenderTemplate.renderResult();FilterFunctions.hideFilterMsg();RenderTemplate.adjustHeight();", 200);
                            } else {
                                ieTimeout = setTimeout("RenderTemplate.renderResult();FilterFunctions.hideFilterMsg();RenderTemplate.adjustHeight();", 100);
                            }
                        } else {
                            RenderTemplate.renderResult();
                            FilterFunctions.hideFilterMsg();
                            RenderTemplate.adjustHeight();
                        }
                        if (DWRHandler.currentPage == -1) {
                            $("#showAll").hide();
                            $("#showAll-footer").hide();
                        }
                        contienuedOnload(false);
                        if (searchResults.debugInfo != null && ((typeof(printRequestResponse) !== "undefined") && printRequestResponse == true)) {
                            $("#requestXml").text(searchResults.debugInfo.itaRequest);
                            $("#responceXml").text(searchResults.debugInfo.itaResponse);
                            $("#reqRes").show();
                        }
                    } else {
                        window.location.replace(searchResults.errorFwdURL);
                    }
                } else {
                    FilterFunctions.errorHandling();
                }
                $(".tableHeaderHolderFareBottom.return2Top").show();
            },
            exceptionHandler: FilterFunctions.errorHandling
        });
        """)
    except:
        print "single page"
        try:
            driver.execute_script("""
            var sortBy = "deltaScheduleAward" ;
            SearchFlightResultsDWR.searchResults(currentSessionCheckSum, sortBy[0], delta.airShopping.numberOfColumnsToRequest, delta.airShopping.cacheKey, {
                async: true,
                timeout: 65000,
                callback: function(searchResults) {
                        var jsonData = {};
                        
                        jsonData['jsonobj'] = JSON.stringify(searchResults);
                        var cabininfo = document.getElementsByClassName('tblHeadUp')[0].innerHTML;
                        jsonData['cabinTypes'] = cabininfo;
                        localStorage.setItem('deltaData', JSON.stringify(jsonData));
                        var element = document.createElement('div');
                        element.id = "submitAdvanced";
                        element.appendChild(document.createTextNode("text"));
                        document.body.appendChild(element);
                        throw new Error("Results found");
                        
                    if (searchResults.errorFwdURL == null || searchResults.errorFwdURL == "") {
                        flightResultsObj.isDOMReady(searchResults, action, false);
                        FilterFunctions.hideFilterMsg();
                    } else {
                    
                        flightResultsObj.isDOMReady(searchResults, false, true);
                    }
                    if (!action) {
                        Wait.hide();
                        $(".tableHeaderHolderFareBottom").show();
                        $("#nextGenAirShopping .tableHeaderHolder").show();
                    }
                },
                errorHandler: function(msg, exc) {
                    shoppingUtil.errorHandler(msg, exc);
                },
                exceptionHandler: function(msg, exc) {
                    (action) ? FilterFunctions.hideFilterMsg(): "";
                    shoppingUtil.exceptionHandler(msg, exc);
                }
            });
            
            """)
        except:
            storeFlag(searchkey, stime)
            return searchkey
    try:
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.ID, "submitAdvanced")))
        result = driver.execute_script(
            """ return localStorage.getItem('deltaData'); """)
        deltaObj = json.loads(result)
        a_file = open('1.json', 'w')
        a_file.write(json.dumps(deltaObj, indent=4))
        # a_file.write(result.encode('utf8'))
        # print deltaObj, '@@@@@@'
        # return
        searchResult = json.loads(deltaObj['jsonobj'])
        # a_file = open('2.json', 'w')
        # a_file.write(json.dumps(searchResult, indent=4))
        # return

        cabinhead = "<tr>" + deltaObj['cabinTypes'] + "</tr>"
        soup = BeautifulSoup(cabinhead, "xml")
        tds = soup.findAll("td")
        pricecol = ''
        pricecol = soup.findAll("a", {"class": "tblHeadBigtext lnkCabinName"})
        if len(pricecol) < 1:
            pricecol = soup.findAll("label", {"class": "tblHeadBigtext"})
        flightData = searchResult["itineraries"]
    except:
        # raise
        storeFlag(searchkey, stime)
        return searchkey

    values_string = []
    for i in range(0, len(flightData)):
        totalFareDetails = flightData[i]['totalFare']
        slicesDetails = flightData[i]['slices']
        departDetail = []
        ariveDetail = []
        flightDetail = []
        operatorDetail = []
        SourceCOde = ''
        tripDuration = ''
        destinationCode = ''
        arivalTime = ''
        departTime = ''
        flightNo = ''
        for k in range(0, len(slicesDetails)):
            tripDuration = slicesDetails[k]['duration']
            SourceCOde = slicesDetails[k]['sliceOrigin']['airportCode']
            destinationCode = slicesDetails[k]['sliceDestination'][
                'airportCode']
            arivalTime = slicesDetails[k]['sliceArrivalTime']
            departTime = slicesDetails[k]['sliceDepartureTime']
            layover = slicesDetails[k]['flights']
            "**************************************** FLIGHT DETAILS ****************************************"
            FlightFlag = 0
            for m in range(0, len(layover)):
                legData = layover[m]['legs']
                for n in range(0, len(legData)):
                    legdetail = legData[n]
                    if legdetail:
                        fromAirport = ''
                        destAirport = ''
                        "=========================LEG INFO=================================="
                        if 'origin' in legdetail:
                            orgnCode = legdetail['origin']['airportCode']
                            cityname = legdetail['origin']['nearByCities'][0][
                                'name']
                            cityCode = legdetail['origin']['nearByCities'][0][
                                'country']['region']['code']
                            fromAirport = orgnCode

                        if 'destination' in legdetail:
                            destCode = legdetail['destination']['airportCode']
                            destcityname = legdetail['destination'][
                                'nearByCities'][0]['name']
                            destcityCode = legdetail['destination'][
                                'nearByCities'][0]['country']['region']['code']
                            destAirport = destCode

                        duration = legdetail['duration']
                        schedDepartureTime = legdetail['schedDepartureTime']
                        schedDepartureDate = legdetail['schedDepartureDate']
                        schedArrivalTime = legdetail['schedArrivalTime']
                        schedArrivalDate = legdetail['schedArrivalDate']

                        '@@@@@@@ departDetails format @@@@@@@'
                        departinfo_time = schedDepartureDate + " " + schedDepartureTime
                        departinfo_time = datetime.datetime.strptime(
                            departinfo_time, '%a %b %d %Y %I:%M%p')
                        departinfo_time = departinfo_time.strftime(
                            '%Y/%m/%d %H:%M')

                        if not DEV_LOCAL:
                            fromAirport = customfunction.get_airport_detail(
                                fromAirport) or fromAirport
                        fromDetail = departinfo_time + " | from  " + fromAirport
                        departDetail.append(fromDetail)
                        departinfo_time = schedArrivalDate + " " + schedArrivalTime
                        departinfo_time = datetime.datetime.strptime(
                            departinfo_time, '%a %b %d %Y %I:%M%p')
                        departinfo_time = departinfo_time.strftime(
                            '%Y/%m/%d %H:%M')

                        if not DEV_LOCAL:
                            destAirport = customfunction.get_airport_detail(
                                destAirport) or destAirport
                        toDetails = departinfo_time + " | at " + destAirport
                        ariveDetail.append(toDetails)
                        aircraft = legdetail['aircraft']['shortName']
                        airlineCode = legdetail['marketAirline']['airline'][
                            'airlineCode']
                        flightNumber = legdetail['flightNumber']
                        if FlightFlag == 0:
                            flightNo = airlineCode + " " + str(flightNumber)

                        # --- NORM ---
                        if aircraft[:3] == 'MD-':
                            aircraft = 'McDonnell Douglas MD ' + aircraft[3:]
                        elif aircraft[:3] == 'CRJ':
                            aircraft = 'Bombardier ' + aircraft
                        flightFormat = airlineCode + " " + str(
                            flightNumber
                        ) + " | " + aircraft + " (" + duration + ")"
                        flightDetail.append(flightFormat)
                        operatedby = legdetail['operatingAirline']['airline'][
                            'airlineName']
                        operatorDetail.append(operatedby)
                        FlightFlag = FlightFlag + 1
        "====================Fare info ================================="

        fareFlag = 0
        cabintype1 = ''
        cabintype2 = ''
        cabintype3 = ''
        ecofare = 0
        echoTax = 0
        bussfare = 0
        busstax = 0
        firstFare = 0
        firsttax = 0
        ecofareClass = ''
        bussFareClass = ''
        firstFareClass = ''
        eco_fare_code = ''
        bus_fare_code = ''
        first_fare_code = ''
        for j in range(0, len(totalFareDetails)):
            cabintype = ''
            miles = 0
            taxes = 0
            fareCode = []
            if totalFareDetails[j]['cabinName'] != None:
                tax = 0
                fareCodeHolder = totalFareDetails[j]['miscFlightInfos']
                for c in range(0, len(fareCodeHolder)):
                    fareCabin = fareCodeHolder[c]['cabinName']
                    bookingCode = fareCodeHolder[c]['displayBookingCode']
                    fareCode.append(bookingCode)
                    bookingCode = bookingCode + " " + fareCabin
                cabinName = totalFareDetails[j]['cabinName']
                miles = totalFareDetails[j]['totalAwardMiles']
                if ',' in miles:
                    miles = miles.replace(',', '')
                taxInt = totalFareDetails[j]['totalPriceLeft']
                if ',' in taxInt:
                    taxInt = taxInt.replace(',', '')
                taxFloat = totalFareDetails[j]['totalPriceRight']
                if taxFloat == '' or taxFloat == None:
                    taxFloat = 0
                tax = float(taxInt) + float(taxFloat)
                currencyCode = totalFareDetails[j]['currencyCode']
                if currencyCode and currencyCode != 'USD':
                    currencychange = urllib.urlopen(
                        "https://www.exchangerate-api.com/%s/%s/%f?k=e002a7b64cabe2535b57f764"
                        % (currencyCode, "USD", float(tax)))
                    taxes = currencychange.read()
                else:
                    taxes = tax
            if len(pricecol) > 1:

                if j == 0:
                    cabintype = "Economy"
                if j == 1 and 'First' not in pricecol[1].text:
                    cabintype = "Business"
                if j == 2 and len(
                        pricecol) > 2 and 'First' not in pricecol[2].text:
                    cabintype = "Business"
            else:
                if len(pricecol) > 0 and len(pricecol) < 2:
                    if 'Main Cabin' in pricecol[0].text:
                        cabintype = "Economy"
                    elif 'First' not in pricecol[0].text:
                        cabintype = 'Business'
            if 'Economy' in cabintype:
                ecofare = miles
                echoTax = taxes
                cabintype1 = "Economy"
                if len(fareCode) > 0:
                    eco_fare_code = ','.join(fareCode)
                    ecofareClass = ' Economy@'.join(fareCode) + ' Economy'
                    ecofareClass = ecofareClass
            elif 'Business' in cabintype:
                cabintype2 = "Business"
                bussfare = miles
                busstax = taxes
                if len(fareCode) > 0:
                    bus_fare_code = ','.join(fareCode)
                    bussFareClass = ' Business@'.join(fareCode) + ' Business'
            else:
                cabintype3 = "First"
                firstFare = miles
                firsttax = taxes
                if len(fareCode) > 0:
                    first_fare_code = ','.join(fareCode)
                    firstFareClass = ' First@'.join(fareCode) + ' First'
        departdetailtext = '@'.join(departDetail)
        ariveDetailtext = '@'.join(ariveDetail)
        flightDetailtext = '@'.join(flightDetail)
        operatorDetailtext = '@'.join(operatorDetail)
        stoppage = ''
        stop = int(len(departDetail) - 1)
        if stop == 0:
            stoppage = "NONSTOP"
        elif stop == 1:
            stoppage = "1 STOP"
        else:
            stoppage = str(stop) + " STOPS"
        arivalTime1 = (datetime.datetime.strptime(arivalTime, '%I:%M%p'))
        arivalTime = arivalTime1.strftime('%H:%M')
        departTime1 = (datetime.datetime.strptime(departTime, '%I:%M%p'))
        departTime = departTime1.strftime('%H:%M')

        if len(pricecol) > 1 and 'Delta Comfort+' in pricecol[1].text:
            bussFareClass = bussFareClass.replace('Business', 'Economy')
            values_string.append(
                (flightNo, str(searchkey), stime, stoppage, "test", SourceCOde,
                 destinationCode, departTime, arivalTime, tripDuration,
                 str(ecofare), str(echoTax), '0', '0', str(firstFare),
                 str(firsttax), cabintype1, cabintype2, cabintype3, "delta",
                 departdetailtext, ariveDetailtext, flightDetailtext,
                 operatorDetailtext, ecofareClass, '', firstFareClass,
                 eco_fare_code, '', first_fare_code))
            values_string.append(
                (flightNo, str(searchkey), stime, stoppage, "test", SourceCOde,
                 destinationCode, departTime, arivalTime, tripDuration,
                 str(bussfare), str(busstax), '0', '0', '0', '0', cabintype1,
                 cabintype2, cabintype3, "delta", departdetailtext,
                 ariveDetailtext, flightDetailtext, operatorDetailtext,
                 bussFareClass, '', '', bus_fare_code, '', ''))
        else:
            values_string.append(
                (flightNo, str(searchkey), stime, stoppage, "test", SourceCOde,
                 destinationCode, departTime, arivalTime, tripDuration,
                 str(ecofare), str(echoTax), str(bussfare), str(busstax),
                 str(firstFare), str(firsttax), cabintype1, cabintype2,
                 cabintype3, "delta", departdetailtext, ariveDetailtext,
                 flightDetailtext, operatorDetailtext, ecofareClass,
                 bussFareClass, firstFareClass, eco_fare_code, bus_fare_code,
                 first_fare_code))
        if len(values_string) > 50:
            if not DEV_LOCAL:
                cursor.executemany(
                    "INSERT INTO pexproject_flightdata (flighno,searchkeyid,scrapetime,stoppage,stoppage_station,origin,destination,departure,arival,duration,maincabin,maintax,firstclass,firsttax,business,businesstax,cabintype1,cabintype2,cabintype3,datasource,departdetails,arivedetails,planedetails,operatedby,economy_code,business_code,first_code,eco_fare_code,business_fare_code,first_fare_code) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);",
                    values_string)
                db.commit()
            else:
                print values_string
            values_string = []

    if len(values_string) > 0:
        if not DEV_LOCAL:
            cursor.executemany(
                "INSERT INTO pexproject_flightdata (flighno,searchkeyid,scrapetime,stoppage,stoppage_station,origin,destination,departure,arival,duration,maincabin,maintax,firstclass,firsttax,business,businesstax,cabintype1,cabintype2,cabintype3,datasource,departdetails,arivedetails,planedetails,operatedby,economy_code,business_code,first_code,eco_fare_code,business_fare_code,first_fare_code) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);",
                values_string)
            db.commit()
        else:
            print values_string

    storeFlag(searchkey, stime)
    return searchkey
Example #48
0
def main():
    from subprocess import check_output, CalledProcessError
    parser = ArgumentParser(epilog=__doc__, formatter_class=RawDescriptionHelpFormatter)
    task = parser.add_mutually_exclusive_group()
    task.add_argument('--install', action='store_true',
                      help="""Install nbstripout in the current repository (set
                              up the git filter and attributes)""")
    task.add_argument('--uninstall', action='store_true',
                      help="""Uninstall nbstripout from the current repository
                              (remove the git filter and attributes)""")
    task.add_argument('--is-installed', action='store_true',
                      help='Check if nbstripout is installed in current repository')
    task.add_argument('--status', action='store_true',
                      help='Print status of nbstripout installation in current repository and configuration summary if installed')
    parser.add_argument('--keep-count', action='store_true',
                        help='Do not strip the execution count/prompt number')
    parser.add_argument('--keep-output', action='store_true',
                        help='Do not strip output')
    parser.add_argument('--attributes', metavar='FILEPATH', help="""Attributes
        file to add the filter to (in combination with --install/--uninstall),
        defaults to .git/info/attributes""")
    parser.add_argument('--global', dest='_global', action='store_true',
                        help='Use global git config (default is local config)')
    task.add_argument('--version', action='store_true',
                      help='Print version')
    parser.add_argument('--force', '-f', action='store_true',
                        help='Strip output also from files with non ipynb extension')

    parser.add_argument('--textconv', '-t', action='store_true',
                        help='Prints stripped files to STDOUT')

    parser.add_argument('files', nargs='*', help='Files to strip output from')
    args = parser.parse_args()

    git_config = ['git', 'config'] + (['--global'] if args._global else [])
    if args.install:
        sys.exit(install(git_config, attrfile=args.attributes))
    if args.uninstall:
        sys.exit(uninstall(git_config, attrfile=args.attributes))
    if args.is_installed:
        sys.exit(status(git_config, verbose=False))
    if args.status:
        sys.exit(status(git_config, verbose=True))
    if args.version:
        print(__version__)
        sys.exit(0)

    try:
        extra_keys = check_output(git_config + ['filter.nbstripout.extrakeys']).strip()
    except CalledProcessError:
        extra_keys = ''

    input_stream = None
    if sys.version_info < (3, 0):
        import codecs
        # Use UTF8 reader/writer for stdin/stdout
        # http://stackoverflow.com/a/1169209
        if sys.stdin:
            input_stream = codecs.getreader('utf8')(sys.stdin)
        output_stream = codecs.getwriter('utf8')(sys.stdout)
    else:
        # Wrap input/output stream in UTF-8 encoded text wrapper
        # https://stackoverflow.com/a/16549381
        if sys.stdin:
            input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
        output_stream = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')

    for filename in args.files:
        if not (args.force or filename.endswith('.ipynb')):
            continue
        try:
            with io.open(filename, 'r', encoding='utf8') as f:
                nb = read(f, as_version=NO_CONVERT)
            nb = strip_output(nb, args.keep_output, args.keep_count, extra_keys)
            if args.textconv:
                write(nb, output_stream)
                output_stream.flush()
            else:
                with io.open(filename, 'w', encoding='utf8') as f:
                    write(nb, f)
        except NotJSONError:
            print("'{}' is not a valid notebook".format(filename), file=sys.stderr)
            sys.exit(1)
        except Exception:
            # Ignore exceptions for non-notebook files.
            print("Could not strip '{}'".format(filename), file=sys.stderr)
            raise

    if not args.files and input_stream:
        try:
            nb = strip_output(read(input_stream, as_version=NO_CONVERT),
                              args.keep_output, args.keep_count, extra_keys)
            write(nb, output_stream)
            output_stream.flush()
        except NotJSONError:
            print('No valid notebook detected', file=sys.stderr)
            sys.exit(1)
Example #49
0
import argparse
import codecs
import os
import re
import sys
import numpy as np

parser = argparse.ArgumentParser(description='')
parser.add_argument('inputf', type=str, metavar='', help='')

A = parser.parse_args()

if __name__ == '__main__':
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr)

    D = {}
    f = codecs.open(A.inputf, "r", "utf-8")
    for line in f:
        fields = line.strip().split()
        D[fields[0]] = np.array(map(float, fields[1:]))
    f.close()

    # for tag in D:
    #     print tag, D[tag]

    POS_DICT = {
        1: "NN",
        2: "NNP",
        3: "IN",
        4: "DT",
Example #50
0
from cam.sgnmt.predictors.grammar import RuleXtractPredictor
from cam.sgnmt.predictors.length import WordCountPredictor, NBLengthPredictor, \
    ExternalLengthPredictor, NgramCountPredictor
from cam.sgnmt.predictors.misc import IdxmapPredictor, UnboundedIdxmapPredictor, \
    UnboundedAltsrcPredictor, AltsrcPredictor, UnkvocabPredictor
from cam.sgnmt.predictors.misc import UnkCountPredictor
from cam.sgnmt.predictors.ngram import SRILMPredictor
from cam.sgnmt.predictors.tokenization import Word2charPredictor, FSTTokPredictor
from cam.sgnmt.tf.interface import tf_get_nmt_predictor, tf_get_nmt_vanilla_decoder, \
    tf_get_rnnlm_predictor, tf_get_default_nmt_config, tf_get_rnnlm_prefix
from cam.sgnmt.ui import get_args, get_parser, validate_args


# UTF-8 support
if sys.version_info < (3, 0):
    sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
    sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
    sys.stdin = codecs.getreader('UTF-8')(sys.stdin)

# Load configuration from command line arguments or configuration file
args = get_args()

# Set up logger
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
logging.getLogger().setLevel(logging.INFO)
if args.verbosity == 'debug':
    logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity == 'info':
    logging.getLogger().setLevel(logging.INFO)
elif args.verbosity == 'warn':
Example #51
0
# -*- coding: utf-8 -*-

# Author: Santosh Rai
# Version: 1.00 (20200520)
# upload files to GCP with gsutil

import platform
import os
import io
import sys
import codecs
import csv
import subprocess
import datetime as dt

sys.stdout = codecs.getwriter("utf-8")(sys.stdout)

if sys.version_info[0] == 3:
    # for Python3
    from tkinter import Frame, Label, Message, StringVar, Canvas, Button, Menu, DoubleVar
    import tkinter.filedialog as filedialogprompt
    from tkinter.ttk import Scrollbar
    from tkinter.constants import *
    import time
    from tkinter import ttk
    from tkinter import messagebox as message_box

else:
    # for Python2
    from Tkinter import Frame, Label, Message, StringVar, Canvas, Button, Menu, DoubleVar
    import tkFileDialog as filedialogprompt
Example #52
0
                                   "deprel." + args.lang,
                                   validate_langspec=True)
        # All relations available in DEPREL are also allowed in DEPS.
        # In addition, there might be relations that are only allowed in DEPS.
        # One of them, "ref", is universal and we currently list it directly in the code here, instead of creating a file "edeprel.ud".
        tagsets[DEPS] = tagsets[DEPREL] | {"ref"} | load_set(
            "deprel.ud", "edeprel." + args.lang, validate_enhanced=True)
        tagsets[FEATS] = load_set("feat_val.ud", "feat_val." + args.lang)
        tagsets[UPOS] = load_set("cpos.ud", None)
        tagsets[TOKENSWSPACE] = load_set("tokens_w_space.ud",
                                         "tokens_w_space." + args.lang)
        tagsets[TOKENSWSPACE] = [
            re.compile(regex, re.U) for regex in tagsets[TOKENSWSPACE]
        ]  #...turn into compiled regular expressions

    out = codecs.getwriter("utf-8")(
        sys.stdout)  # hard-coding - does this ever need to be anything else?

    try:
        known_sent_ids = set()
        open_files = []
        if args.input == []:
            args.input.append("-")
        for fname in args.input:
            if fname == "-":
                open_files.append(codecs.getreader("utf-8")(os.fdopen(0, "U")))
            else:
                inp_raw = open(fname, mode="U")
                open_files.append(codecs.getreader("utf-8")(inp_raw))
        for curr_fname, inp in zip(args.input, open_files):
            validate(inp, out, args, tagsets, known_sent_ids)
    except:
Example #53
0
# You should have received a copy of the GNU General Public License
# along with LilyPond.  If not, see <http://www.gnu.org/licenses/>.

import __main__
import codecs
import gettext
import glob
import optparse
import os
import re
import shutil
import sys
import time

sys.stdin = codecs.getreader('utf8')(sys.stdin.detach())
sys.stdout = codecs.getwriter('utf8')(sys.stdout.detach())
sys.stderr = codecs.getwriter('utf8')(sys.stderr.detach())

# Lilylib globals.
program_name = os.path.basename(sys.argv[0])

# Logging framework: We have the following output functions:
#    error
#    warning
#    progress
#    debug

loglevels = {
    "NONE": 0,
    "ERROR": 1,
    "WARN": 2,
Example #54
0
def main(prog_args=sys.argv[1:]):
    # in case we changed the location of the settings directory where the
    # config file lives, we need to parse this argument before we parse
    # the rest of the arguments (which can overwrite the options in the
    # config file)
    settings_parser = argparse.ArgumentParser(add_help=False)
    settings_parser.add_argument(
        '-S',
        '--settings',
        help='Path to settings, config and temp files directory '
        '[Default=~/.spotify-ripper]')
    args, remaining_argv = settings_parser.parse_known_args(prog_args)
    init_util_globals(args)

    # load config file, overwriting any defaults
    defaults = {
        "bitrate": "320",
        "quality": "320",
        "comp": "10",
        "vbr": "0",
        "partial_check": "weak",
    }
    defaults = load_config(defaults)

    spotipy_envs = [
        "SPOTIPY_CLIENT_ID", "SPOTIPY_CLIENT_SECRET", "SPOTIPY_REDIRECT_URI"
    ]

    for spotipy_env in spotipy_envs:
        if spotipy_env not in os.environ:
            value = defaults.get(spotipy_env.lower())
            if value:
                os.environ[spotipy_env] = value

    parser = argparse.ArgumentParser(
        prog='spotify-ripper',
        description='Rips Spotify URIs to MP3s with ID3 tags and album covers',
        parents=[settings_parser],
        formatter_class=argparse.RawTextHelpFormatter,
        epilog='''Example usage:
    rip a single file: spotify-ripper -u user spotify:track:52xaypL0Kjzk0ngwv3oBPR
    rip entire playlist: spotify-ripper -u user spotify:user:username:playlist:4vkGNcsS8lRXj4q945NIA4
    rip a list of URIs: spotify-ripper -u user list_of_uris.txt
    rip tracks from Spotify's charts: spotify-ripper -l spotify:charts:regional:global:weekly:latest
    search for tracks to rip: spotify-ripper -l -Q 160 -o "album:Rumours track:'the chain'"
    ''')

    # create group to prevent user from using both the -l and -u option
    is_user_set = defaults.get('user') is not None
    is_last_set = defaults.get('last') is True
    if is_user_set or is_last_set:
        if is_user_set and is_last_set:
            print("spotify-ripper: error: one of the arguments -u/--user "
                  "-l/--last is required")
            sys.exit(1)
        else:
            group = parser.add_mutually_exclusive_group(required=False)
    else:
        group = parser.add_mutually_exclusive_group(required=True)

    encoding_group = parser.add_mutually_exclusive_group(required=False)

    # set defaults
    parser.set_defaults(**defaults)

    prog_version = pkg_resources.require("spotify-ripper")[0].version
    parser.add_argument(
        '-a',
        '--ascii',
        action='store_true',
        help='Convert the file name and the metadata tags to ASCII '
        'encoding [Default=utf-8]')
    encoding_group.add_argument(
        '--aac',
        action='store_true',
        help='Rip songs to AAC format with FreeAAC instead of MP3')
    encoding_group.add_argument(
        '--aiff',
        action='store_true',
        help='Rip songs to lossless AIFF encoding instead of MP3')
    encoding_group.add_argument(
        '--alac',
        action='store_true',
        help='Rip songs to Apple Lossless format instead of MP3')
    parser.add_argument(
        '--all-artists',
        action='store_true',
        help='Store all artists, rather than just the main artist, in the '
        'track\'s metadata tag')
    parser.add_argument(
        '--artist-album-type',
        help='Only load albums of specified types when passing a Spotify '
        'artist URI [Default=album,single,ep,compilation,appears_on]')
    parser.add_argument(
        '--artist-album-market',
        help='Only load albums with the specified ISO2 country code when '
        'passing a Spotify artist URI. You may get duplicate albums '
        'if not set. [Default=any]')
    parser.add_argument(
        '-A',
        '--ascii-path-only',
        action='store_true',
        help='Convert the file name (but not the metadata tags) to ASCII '
        'encoding [Default=utf-8]')
    parser.add_argument('-b', '--bitrate', help='CBR bitrate [Default=320]')
    parser.add_argument('-c',
                        '--cbr',
                        action='store_true',
                        help='CBR encoding [Default=VBR]')
    parser.add_argument(
        '--comp',
        help='compression complexity for FLAC and Opus [Default=Max]')
    parser.add_argument(
        '--comment',
        help='Set comment metadata tag to all songs. Can include '
        'same tags as --format.')
    parser.add_argument(
        '--cover-file',
        help='Save album cover image to file name (e.g "cover.jpg") '
        '[Default=embed]')
    parser.add_argument(
        '--cover-file-and-embed',
        metavar="COVER_FILE",
        help='Same as --cover-file but embeds the cover image too')
    parser.add_argument(
        '-d',
        '--directory',
        help='Base directory where ripped MP3s are saved [Default=cwd]')
    parser.add_argument('--fail-log',
                        help="Logs the list of track URIs that failed to rip")
    encoding_group.add_argument(
        '--flac',
        action='store_true',
        help='Rip songs to lossless FLAC encoding instead of MP3')
    parser.add_argument(
        '-f',
        '--format',
        help='Save songs using this path and filename structure (see README)')
    parser.add_argument(
        '--format-case',
        choices=['upper', 'lower', 'capitalize'],
        help='Convert all words of the file name to upper-case, lower-case, '
        'or capitalized')
    parser.add_argument('--flat',
                        action='store_true',
                        help='Save all songs to a single directory '
                        '(overrides --format option)')
    parser.add_argument(
        '--flat-with-index',
        action='store_true',
        help='Similar to --flat [-f] but includes the playlist index at '
        'the start of the song file')
    parser.add_argument(
        '-g',
        '--genres',
        choices=['artist', 'album'],
        help='Attempt to retrieve genre information from Spotify\'s '
        'Web API [Default=skip]')
    parser.add_argument(
        '--grouping',
        help='Set grouping metadata tag to all songs. Can include '
        'same tags as --format.')
    encoding_group.add_argument(
        '--id3-v23',
        action='store_true',
        help='Store ID3 tags using version v2.3 [Default=v2.4]')
    parser.add_argument('-k',
                        '--key',
                        help='Path to Spotify application key file '
                        '[Default=Settings Directory]')
    group.add_argument('-u', '--user', help='Spotify username')
    parser.add_argument('-p',
                        '--password',
                        help='Spotify password [Default=ask interactively]')
    parser.add_argument(
        '--large-cover-art',
        action='store_true',
        help='Attempt to retrieve 640x640 cover art from Spotify\'s Web API '
        '[Default=300x300]')
    group.add_argument('-l',
                       '--last',
                       action='store_true',
                       help='Use last login credentials')
    parser.add_argument(
        '-L',
        '--log',
        help='Log in a log-friendly format to a file (use - to log to stdout)')
    encoding_group.add_argument(
        '--pcm',
        action='store_true',
        help='Saves a .pcm file with the raw PCM data instead of MP3')
    encoding_group.add_argument(
        '--mp4',
        action='store_true',
        help='Rip songs to MP4/M4A format with Fraunhofer FDK AAC codec '
        'instead of MP3')
    parser.add_argument('--normalize',
                        action='store_true',
                        help='Normalize volume levels of tracks')
    parser.add_argument('-na',
                        '--normalized-ascii',
                        action='store_true',
                        help='Convert the file name to normalized ASCII with '
                        'unicodedata.normalize (NFKD)')
    parser.add_argument('-o',
                        '--overwrite',
                        action='store_true',
                        help='Overwrite existing MP3 files [Default=skip]')
    encoding_group.add_argument(
        '--opus',
        action='store_true',
        help='Rip songs to Opus encoding instead of MP3')
    parser.add_argument(
        '--partial-check',
        choices=['none', 'weak', 'strict'],
        help='Check for and overwrite partially ripped files. "weak" will '
        'err on the side of not re-ripping the file if it is unsure, '
        'whereas "strict" will re-rip the file [Default=weak]')
    parser.add_argument(
        '--play-token-resume',
        metavar="RESUME_AFTER",
        help='If the \'play token\' is lost to a different device using '
        'the same Spotify account, the script will wait a speficied '
        'amount of time before restarting. This argument takes the same '
        'values as --resume-after [Default=abort]')
    parser.add_argument('--playlist-m3u',
                        action='store_true',
                        help='create a m3u file when ripping a playlist')
    parser.add_argument('--playlist-wpl',
                        action='store_true',
                        help='create a wpl file when ripping a playlist')
    parser.add_argument(
        '--playlist-sync',
        action='store_true',
        help='Sync playlist songs (rename and remove old songs)')
    parser.add_argument(
        '--plus-pcm',
        action='store_true',
        help='Saves a .pcm file in addition to the encoded file (e.g. mp3)')
    parser.add_argument(
        '--plus-wav',
        action='store_true',
        help='Saves a .wav file in addition to the encoded file (e.g. mp3)')
    parser.add_argument(
        '-q',
        '--vbr',
        help='VBR quality setting or target bitrate for Opus [Default=0]')
    parser.add_argument('-Q',
                        '--quality',
                        choices=['160', '320', '96'],
                        help='Spotify stream bitrate preference [Default=320]')
    parser.add_argument(
        '--remove-offline-cache',
        action='store_true',
        help='Remove libspotify\'s offline cache directory after the rip'
        'is complete to save disk space')
    parser.add_argument(
        '--resume-after',
        help='Resumes script after a certain amount of time has passed '
        'after stopping (e.g. 1h30m). Alternatively, accepts a specific '
        'time in 24hr format to start after (e.g 03:30, 16:15). '
        'Requires --stop-after option to be set')
    parser.add_argument(
        '-R',
        '--replace',
        nargs="+",
        required=False,
        help='pattern to replace the output filename separated by "/". '
        'The following example replaces all spaces with "_" and all "-" '
        'with ".":    spotify-ripper --replace " /_" "\-/." uri')
    parser.add_argument('-s',
                        '--strip-colors',
                        action='store_true',
                        help='Strip coloring from output [Default=colors]')
    parser.add_argument(
        '--stereo-mode',
        choices=['j', 's', 'f', 'd', 'm', 'l', 'r'],
        help='Advanced stereo settings for Lame MP3 encoder only')
    parser.add_argument(
        '--stop-after',
        help='Stops script after a certain amount of time has passed '
        '(e.g. 1h30m). Alternatively, accepts a specific time in 24hr '
        'format to stop after (e.g 03:30, 16:15)')
    parser.add_argument(
        '--timeout',
        type=int,
        help=
        'Override the PySpotify timeout value in seconds (Default=10 seconds)')
    parser.add_argument('-V',
                        '--version',
                        action='version',
                        version=prog_version)
    encoding_group.add_argument(
        '--wav',
        action='store_true',
        help='Rip songs to uncompressed WAV file instead of MP3')
    parser.add_argument('--windows-safe',
                        action='store_true',
                        help='Make filename safe for Windows file system '
                        '(truncate filename to 255 characters)')
    encoding_group.add_argument(
        '--vorbis',
        action='store_true',
        help='Rip songs to Ogg Vorbis encoding instead of MP3')
    parser.add_argument(
        '-r',
        '--remove-from-playlist',
        action='store_true',
        help='[WARNING: SPOTIFY IS NOT PROPROGATING PLAYLIST CHANGES TO '
        'THEIR SERVERS] Delete tracks from playlist after successful '
        'ripping [Default=no]')
    parser.add_argument(
        'uri',
        nargs="+",
        help='One or more Spotify URI(s) (either URI, a file of URIs or a '
        'search query)')
    args = parser.parse_args(remaining_argv)
    init_util_globals(args)

    # kind of a hack to get colorama stripping to work when outputting
    # to a file instead of stdout.  Taken from initialise.py in colorama
    def wrap_stream(stream, convert, strip, autoreset, wrap):
        if wrap:
            wrapper = AnsiToWin32(stream,
                                  convert=convert,
                                  strip=strip,
                                  autoreset=autoreset)
            if wrapper.should_wrap():
                stream = wrapper.stream
        return stream

    args.has_log = args.log is not None
    if args.has_log:
        if args.log == "-":
            init(strip=True)
        else:
            encoding = "ascii" if args.ascii else "utf-8"
            log_file = codecs.open(enc_str(args.log), 'a', encoding)
            sys.stdout = wrap_stream(log_file, None, True, False, True)
    else:
        init(strip=True if args.strip_colors else None)

    if args.ascii_path_only is True:
        args.ascii = True

    # unless explicitly told not to, we are going to encode
    # for utf-8 by default
    if not args.ascii and sys.version_info < (3, 0):
        sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

    # small sanity check on user option
    if args.user is not None and args.user == "USER":
        print(Fore.RED + "Please pass your username as --user " +
              "<YOUR_USER_NAME> instead of --user USER " +
              "<YOUR_USER_NAME>..." + Fore.RESET)
        sys.exit(1)

    # give warning for broken feature
    if args.remove_from_playlist:
        print(Fore.RED + "--REMOVE-FROM-PLAYLIST WARNING:")
        print("SPOTIFY HAS BROKEN libspotify")
        print("THE PLAYLIST WILL BE EMPTIED AT THE END USING THE WEB API")
        print(
            "CHECK THE GITHUB FOR INSTRUCTIONS ON AUTHENTICATING THE WEB API" +
            Fore.RESET)

    if args.wav:
        args.output_type = "wav"
    elif args.pcm:
        args.output_type = "pcm"
    elif args.flac:
        args.output_type = "flac"
        if args.comp == "10":
            args.comp = "8"
    elif args.vorbis:
        args.output_type = "ogg"
        if args.vbr == "0":
            args.vbr = "9"
    elif args.opus:
        args.output_type = "opus"
        if args.vbr == "0":
            args.vbr = "320"
    elif args.aac:
        args.output_type = "aac"
        if args.vbr == "0":
            args.vbr = "500"
    elif args.mp4:
        args.output_type = "m4a"
        if args.vbr == "0":
            args.vbr = "5"
    elif args.alac:
        args.output_type = "alac.m4a"
    else:
        args.output_type = "mp3"

    # check that encoder tool is available
    encoders = {
        "flac": ("flac", "flac"),
        "aiff": ("sox", "sox"),
        "aac": ("faac", "faac"),
        "ogg": ("oggenc", "vorbis-tools"),
        "opus": ("opusenc", "opus-tools"),
        "mp3": ("lame", "lame"),
        "m4a": ("fdkaac", "fdk-aac-encoder"),
        "alac.m4a": ("avconv", "libav-tools"),
    }
    if args.output_type in encoders.keys():
        encoder = encoders[args.output_type][0]
        if which(encoder) is None:
            print(Fore.RED + "Missing dependency '" + encoder +
                  "'.  Please install and add to path..." + Fore.RESET)
            # assumes OS X or Ubuntu/Debian
            command_help = ("brew install " if sys.platform == "darwin" else
                            "sudo apt-get install ")
            print("...try " + Fore.YELLOW + command_help +
                  encoders[args.output_type][1] + Fore.RESET)
            sys.exit(1)

    # format string
    if args.flat:
        args.format = "{artist} - {track_name}.{ext}"
    elif args.flat_with_index:
        args.format = "{idx:3} - {artist} - {track_name}.{ext}"
    elif args.format is None:
        args.format = "{album_artist}/{album}/{artist} - {track_name}.{ext}"

    # print some settings
    print(Fore.GREEN + "Spotify Ripper - v" + prog_version + Fore.RESET)

    def encoding_output_str():
        if args.output_type == "wav":
            return "WAV, Stereo 16bit 44100Hz"
        elif args.output_type == "pcm":
            return "Raw Headerless PCM, Stereo 16bit 44100Hz"
        else:
            if args.output_type == "flac":
                return "FLAC, Compression Level: " + args.comp
            elif args.output_type == "aiff":
                return "AIFF"
            elif args.output_type == "alac.m4a":
                return "Apple Lossless (ALAC)"
            elif args.output_type == "ogg":
                codec = "Ogg Vorbis"
            elif args.output_type == "opus":
                codec = "Opus"
            elif args.output_type == "mp3":
                codec = "MP3"
            elif args.output_type == "m4a":
                codec = "MPEG4 AAC"
            elif args.output_type == "aac":
                codec = "AAC"
            else:
                codec = "Unknown"

            if args.cbr:
                return codec + ", CBR " + args.bitrate + " kbps"
            else:
                return codec + ", VBR " + args.vbr

    print(Fore.YELLOW + "  Encoding output:\t" + Fore.RESET +
          encoding_output_str())
    print(Fore.YELLOW + "  Spotify bitrate:\t" + Fore.RESET + args.quality +
          " kbps")

    def unicode_support_str():
        if args.ascii_path_only:
            return "Unicode tags, ASCII file path"
        elif args.ascii:
            return "ASCII only"
        else:
            return "Yes"

    # check that --stop-after and --resume-after options are valid
    if args.stop_after is not None and \
            parse_time_str(args.stop_after) is None:
        print(Fore.RED + "--stop-after option is not valid" + Fore.RESET)
        sys.exit(1)
    if args.resume_after is not None and \
            parse_time_str(args.resume_after) is None:
        print(Fore.RED + "--resume-after option is not valid" + Fore.RESET)
        sys.exit(1)
    if args.play_token_resume is not None and \
            parse_time_str(args.play_token_resume) is None:
        print(Fore.RED + "--play_token_resume option is not valid" +
              Fore.RESET)
        sys.exit(1)

    print(Fore.YELLOW + "  Unicode support:\t" + Fore.RESET +
          unicode_support_str())
    print(Fore.YELLOW + "  Output directory:\t" + Fore.RESET + base_dir())
    print(Fore.YELLOW + "  Settings directory:\t" + Fore.RESET +
          settings_dir())

    print(Fore.YELLOW + "  Format String:\t" + Fore.RESET + args.format)
    print(Fore.YELLOW + "  Overwrite files:\t" + Fore.RESET +
          ("Yes" if args.overwrite else "No"))

    # patch a bug when Python 3/MP4
    if sys.version_info >= (3, 0) and \
            (args.output_type == "m4a" or args.output_type == "alac.m4a"):
        patch_bug_in_mutagen()

    ripper = Ripper(args)
    ripper.start()

    # try to listen for terminal resize events
    # (needs to be called on main thread)
    if not args.has_log:
        ripper.progress.handle_resize()
        signal.signal(signal.SIGWINCH, ripper.progress.handle_resize)

    def hasStdinData():
        return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])

    def abort(set_logged_in=False):
        ripper.abort_rip()
        if set_logged_in:
            ripper.ripper_continue.set()
        ripper.join()
        sys.exit(1)

    def skip():
        if ripper.ripping.is_set():
            ripper.skip.set()

    # check if we were passed a file name or search
    def check_uri_args():
        if len(args.uri) == 1 and path_exists(args.uri[0]):
            encoding = "ascii" if args.ascii else "utf-8"
            args.uri = [
                line.strip()
                for line in codecs.open(enc_str(args.uri[0]), 'r', encoding)
                if not line.strip().startswith("#") and len(line.strip()) > 0
            ]
        elif len(args.uri) == 1 and not args.uri[0].startswith("spotify:"):
            args.uri = [list(ripper.search_query(args.uri[0]))]

    # login and uri_parse on main thread to catch any KeyboardInterrupt
    try:
        if not ripper.login():
            print(Fore.RED + "Encountered issue while logging into "
                  "Spotify, aborting..." + Fore.RESET)
            abort(set_logged_in=True)
        else:
            check_uri_args()
            ripper.ripper_continue.set()

    except (KeyboardInterrupt, Exception) as e:
        if not isinstance(e, KeyboardInterrupt):
            print(str(e))
        print("\n" + Fore.RED + "Aborting..." + Fore.RESET)
        abort(set_logged_in=True)

    # wait for ripping thread to finish
    if not args.has_log:
        try:
            stdin_settings = termios.tcgetattr(sys.stdin)
        except termios.error:
            stdin_settings = None
    try:
        if not args.has_log and stdin_settings:
            tty.setcbreak(sys.stdin.fileno())

        while ripper.isAlive():
            schedule.run_pending()

            # check if the escape button was pressed
            if not args.has_log and hasStdinData():
                c = sys.stdin.read(1)
                if c == '\x1b':
                    skip()
            ripper.join(0.1)
    except (KeyboardInterrupt, Exception) as e:
        if not isinstance(e, KeyboardInterrupt):
            print(str(e))
        print("\n" + Fore.RED + "Aborting..." + Fore.RESET)
        abort()
    finally:
        if not args.has_log and stdin_settings:
            termios.tcsetattr(sys.stdin, termios.TCSADRAIN, stdin_settings)
Example #55
0
@lazy_property
def cardnames() -> List[str]:
    return fetcher.catalog_cardnames()

@lazy_property
def pd_legal_cards() -> List[str]:
    print('Fetching http://pdmtgo.com/legal_cards.txt')
    return requests.get('http://pdmtgo.com/legal_cards.txt').text.split('\n')

ALL_BUGS: List[Dict] = []

VERIFICATION_BY_ISSUE: Dict[int, str] = {}

if sys.stdout.encoding != 'utf-8':
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer) # type: ignore

def main() -> None:
    if configuration.get('github_user') is None or configuration.get('github_password') is None:
        print('Invalid Config')
        sys.exit(1)

    verification_numbers()

    issues = repo.get_repo().get_issues()
    for issue in issues:
        print(issue.title)
        if issue.state == 'open':
            process_issue(issue)

    txt = open('bannable.txt', mode='w')
Example #56
0
def save_jsonl_gz(data, filename):
    with gzip.GzipFile(filename, 'wb') as out_file:
        writer = codecs.getwriter('utf-8')
        for element in data:
            writer(out_file).write(json.dumps(element))
            writer(out_file).write('\n')
Example #57
0
def main():
    usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
    version_string = 'pyinstrument {v}, on Python {pyv[0]}.{pyv[1]}.{pyv[2]}'.format(
        v=pyinstrument.__version__,
        pyv=sys.version_info,
    )
    parser = optparse.OptionParser(usage=usage, version=version_string)
    parser.allow_interspersed_args = False

    def dash_m_callback(option, opt, value, parser):
        parser.values.module_name = value
        # everything after the -m argument should be passed to that module
        parser.values.module_args = parser.rargs + parser.largs
        parser.rargs[:] = []
        parser.largs[:] = []

    parser.add_option('', '--load-prev',
        dest='load_prev', action='store', metavar='ID',
        help="instead of running a script, load a previous report")

    parser.add_option('-m', '',
        dest='module_name', action='callback', callback=dash_m_callback,
        type="str",
        help="run library module as a script, like 'python -m module'")
    parser.add_option('', '--from-path',
        dest='from_path', action='store_true',
        help="(POSIX only) instead of the working directory, look for scriptfile in the PATH environment variable")

    parser.add_option('-o', '--outfile',
        dest="outfile", action='store',
        help="save to <outfile>", default=None)

    parser.add_option('-r', '--renderer',
        dest='renderer', action='store', type='string',
        help=("how the report should be rendered. One of: 'text', 'html', 'json', or python "
              "import path to a renderer class"),
        default='text')

    parser.add_option('', '--html',
        dest="output_html", action='store_true',
        help=optparse.SUPPRESS_HELP, default=False)  # deprecated shortcut for --renderer=html

    parser.add_option('-t', '--timeline',
        dest='timeline', action='store_true',
        help="render as a timeline - preserve ordering and don't condense repeated calls")

    parser.add_option('', '--hide',
        dest='hide_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to hide. Defaults to "
              "hiding non-application code"))
    parser.add_option('', '--hide-regex',
        dest='hide_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to hide. Useful if --hide doesn't give "
              "enough control."))

    parser.add_option('', '--show',
        dest='show_fnmatch', action='store', metavar='EXPR',
        help=("glob-style pattern matching the file paths whose frames to "
              "show, regardless of --hide or --hide-regex. For example, use "
              "--show '*/<library>/*' to show frames within a library that "
              "would otherwise be hidden."))
    parser.add_option('', '--show-regex',
        dest='show_regex', action='store', metavar='REGEX',
        help=("regex matching the file paths whose frames to always show. "
              "Useful if --show doesn't give enough control."))
    parser.add_option('', '--show-all',
        dest='show_all', action='store_true',
        help="show everything", default=False)

    parser.add_option('', '--unicode',
        dest='unicode', action='store_true',
        help='(text renderer only) force unicode text output')
    parser.add_option('', '--no-unicode',
        dest='unicode', action='store_false',
        help='(text renderer only) force ascii text output')

    parser.add_option('', '--color',
        dest='color', action='store_true',
        help='(text renderer only) force ansi color text output')
    parser.add_option('', '--no-color',
        dest='color', action='store_false',
        help='(text renderer only) force no color text output')

    if not sys.argv[1:]:
        parser.print_help()
        sys.exit(2)

    options, args = parser.parse_args()

    if args == [] and options.module_name is None and options.load_prev is None:
        parser.print_help()
        sys.exit(2)

    if options.module_name is not None and options.from_path:
        parser.error("The options -m and --from-path are mutually exclusive.")

    if options.from_path and sys.platform == 'win32':
        parser.error('--from-path is not supported on Windows')

    if options.hide_fnmatch is not None and options.hide_regex is not None:
        parser.error('You can‘t specify both --hide and --hide-regex')

    if options.hide_fnmatch is not None:
        options.hide_regex = fnmatch.translate(options.hide_fnmatch)

    show_options_used = [options.show_fnmatch is not None, options.show_regex is not None, options.show_all]
    if show_options_used.count(True) > 1:
        parser.error('You can only specify one of --show, --show-regex and --show-all')

    if options.show_fnmatch is not None:
        options.show_regex = fnmatch.translate(options.show_fnmatch)
    if options.show_all:
        options.show_regex = r'.*'

    if options.load_prev:
        session = load_report(options.load_prev)
    else:
        if options.module_name is not None:
            if not (sys.path[0] and os.path.samefile(sys.path[0], '.')):
                # when called with '-m', search the cwd for that module
                sys.path[0] = os.path.abspath('.')

            sys.argv[:] = [options.module_name] + options.module_args
            code = "run_module(modname, run_name='__main__', alter_sys=True)"
            globs = {
                'run_module': runpy.run_module,
                'modname': options.module_name
            }
        else:
            sys.argv[:] = args
            if options.from_path:
                progname = shutil.which(args[0])
                if progname is None:
                    sys.exit('Error: program {} not found in PATH!'.format(args[0]))
            else:
                progname = args[0]
                if not os.path.exists(progname):
                    sys.exit('Error: program {} not found!'.format(args[0]))

            # Make sure we overwrite the first entry of sys.path ('.') with directory of the program.
            sys.path[0] = os.path.dirname(progname)

            code = "run_path(progname, run_name='__main__')"
            globs = {
                'run_path': runpy.run_path,
                'progname': progname
            }

        profiler = Profiler()

        profiler.start()

        try:
            exec_(code, globs, None)
        except (SystemExit, KeyboardInterrupt):
            pass

        profiler.stop()
        session = profiler.last_session

    if options.output_html:
        options.renderer = 'html'

    output_to_temp_file = (options.renderer == 'html'
                           and not options.outfile
                           and file_is_a_tty(sys.stdout))

    if options.outfile:
        f = codecs.open(options.outfile, 'w', 'utf-8')
        should_close_f_after_writing = True
    elif not output_to_temp_file:
        if PY2:
            f = codecs.getwriter('utf-8')(sys.stdout)
        else:
            f = sys.stdout
        should_close_f_after_writing = False

    renderer_kwargs = {'processor_options': {
        'hide_regex': options.hide_regex,
        'show_regex': options.show_regex,
    }}

    if options.timeline is not None:
        renderer_kwargs['timeline'] = options.timeline

    if options.renderer == 'text':
        unicode_override = options.unicode != None
        color_override = options.color != None
        unicode = options.unicode if unicode_override else file_supports_unicode(f)
        color = options.color if color_override else file_supports_color(f)

        renderer_kwargs.update({'unicode': unicode, 'color': color})

    renderer_class = get_renderer_class(options.renderer)
    renderer = renderer_class(**renderer_kwargs)

    # remove this frame from the trace
    renderer.processors.append(remove_first_pyinstrument_frame_processor)


    if output_to_temp_file:
        output_filename = renderer.open_in_browser(session)
        print('stdout is a terminal, so saved profile output to %s' % output_filename)
    else:
        f.write(renderer.render(session))
        if should_close_f_after_writing:
            f.close()

    if options.renderer == 'text':
        _, report_identifier = save_report(session)
        print('To view this report with different options, run:')
        print('    pyinstrument --load-prev %s [options]' % report_identifier)
        print('')
Example #58
0
    def extract_data(self):

        # create tmp_path folder if necessary
        if not os.path.exists(
                os.path.join(self.tmp_path, self.collection_name, 'data')):
            os.makedirs(
                os.path.join(self.tmp_path, self.collection_name, 'data'))

        if not os.path.exists(
                os.path.join(self.tmp_path, self.collection_name, 'rejected')):
            os.makedirs(
                os.path.join(self.tmp_path, self.collection_name, 'rejected'))

        # delete old tmp files if exists
        for old_file in glob.glob(
                os.path.join(self.tmp_path, self.collection_name, 'data',
                             '*')):
            print "Deleting old file %s" % (old_file)
            os.remove(old_file)

        for old_file in glob.glob(
                os.path.join(self.tmp_path, self.collection_name, 'rejected',
                             '*')):
            print "Deleting old file %s" % (old_file)
            os.remove(old_file)

        # some state variables
        part_num = 0
        extract_file = None

        reject_part_num = 0
        reject_file = None

        # start mongo client
        db = self.mongo_client[self.db_name]
        collection = db[self.collection_name]

        # turn query string into json
        if self.extract_query is not None:
            if 'ObjectId' in self.extract_query:
                # kinda hacky.. and dangerous! This is to evaluate an expression
                # like {"_id": {$gt:ObjectId("55401a60151a4b1a4f000001")}}
                from bson.objectid import ObjectId
                extract_query_json = eval(self.extract_query)
            else:
                extract_query_json = json.loads(self.extract_query)
        else:
            extract_query_json = None

        # query collection, sort by collection_sort_by_field
        for data in collection.find(extract_query_json).sort(
                self.collection_sort_by_field, 1):

            # track min and max id for auditing..
            if self.sort_by_field_min == None:
                self.sort_by_field_min = data[self.collection_sort_by_field]
            self.sort_by_field_max = data[self.collection_sort_by_field]

            # open a new file if necessary
            if self.num_records_extracted % NUM_RECORDS_PER_PART == 0:

                if extract_file != None:
                    extract_file.close()

                part_num += 1
                extract_file_name = os.path.join(self.tmp_path,
                                                 self.collection_name, 'data',
                                                 str(part_num))
                extract_file = open(extract_file_name, "w")
                extract_file_codec = codecs.getwriter("utf-8")(extract_file)
                self.extract_file_names.append(extract_file_name)
                print "Creating file %s" % extract_file_name

            # validate policies
            rejected = False
            for required_field_name, policy in self.required_fields.iteritems(
            ):
                if policy['required'] and jsonpath_get(
                        data, required_field_name) is None:

                    # --------------------------------------------------------
                    # document found that doesn't contain required fields.
                    # --------------------------------------------------------

                    # open a new file if necessary
                    if self.num_records_rejected % NUM_RECORDS_PER_PART == 0:

                        if reject_file != None:
                            reject_file.close()

                        reject_part_num += 1
                        reject_file_name = os.path.join(
                            self.tmp_path, self.collection_name, 'rejected',
                            str(reject_part_num))
                        reject_file = open(reject_file_name, "w")
                        reject_file_codec = codecs.getwriter("utf-8")(
                            reject_file)
                        self.reject_file_names.append(reject_file_name)
                        print "Creating reject file %s" % reject_file_name

                    self.num_records_rejected += 1
                    reject_file_codec.write("Rejected. Missing %s. Data: %s" %
                                            (required_field_name, dumps(data)))
                    reject_file_codec.write('\n')

                    rejected = True
                    break

            if not rejected:
                self.num_records_extracted += 1
                extract_file_codec.write(dumps(data))
                extract_file_codec.write('\n')

        if extract_file != None:
            extract_file.close()

        if reject_file != None:
            reject_file.close()
Example #59
0
# list of all the archive items that exist
itemlist = 'what_cd-itemlist-2014-04-29.txt'

# output file
opfile = 'flaclist2.txt'

# but write it zipped
import gzip

itemcount = 0
flaccount = 0
# Read the metadata of each item to create a list of artist album title length dir name
with open(itemlist, 'r') as fp:
    # write directly to a gzip file, use codecs to translate to utf-8 on output
    with codecs.getwriter('utf-8')(gzip.open(opfile + '.gz', 'wb')) as opfp:
        # header
        opfp.write(('\t'.join(
            ['#artist', 'album', 'title', 'length', 'dir', 'name'])) + '\n')
        # body
        for item in fp:
            # Ignore any errors coming out of ia
            iacmd = ['ia', 'metadata', item]
            try:
                metadata = json.loads(subprocess.check_output(iacmd))
            except:
                print "*** error running/parsing " + " ".join(iacmd)
                metadata = dict()  # fake an empty result
            #print "Archive",item,"contains",metadata['files_count'],"files"
            if 'files' in metadata:
                for fileinfo in metadata['files']:
Example #60
0
    EXIT
    HELP
    LOOKUP
    REMOVE PARAM[ETER]S
    SHOW PARAM[ETER]S
    VERSION

If a numeric value is entered, details of the node with that ID are displayed.
Any other statements are executed as Cypher.
"""

if not PY3:
    _stdin = sys.stdin
    preferred_encoding = locale.getpreferredencoding()
    sys.stdin = codecs.getreader(preferred_encoding)(sys.stdin)
    sys.stdout = codecs.getwriter(preferred_encoding)(sys.stdout)
    sys.stderr = codecs.getwriter(preferred_encoding)(sys.stderr)


class ResultWriter(object):
    def __init__(self, out=None):
        self.out = out or sys.stdout

    @classmethod
    def _stringify(cls, value, quoted=False):
        if value is None:
            if quoted:
                return "null"
            else:
                return ""
        elif isinstance(value, list):