Beispiel #1
1
 def _get_user_identity(self):
     """Determine the identity to use for new commits.
     """
     user = os.environ.get("GIT_COMMITTER_NAME")
     email = os.environ.get("GIT_COMMITTER_EMAIL")
     config = self.get_config_stack()
     if user is None:
         try:
             user = config.get(("user", ), "name")
         except KeyError:
             user = None
     if email is None:
         try:
             email = config.get(("user", ), "email")
         except KeyError:
             email = None
     if user is None:
         import getpass
         user = getpass.getuser().encode(sys.getdefaultencoding())
     if email is None:
         import getpass
         import socket
         email = ("{}@{}".format(getpass.getuser(), socket.gethostname())
                  .encode(sys.getdefaultencoding()))
     return (user + b" <" + email + b">")
Beispiel #2
0
def create_pdf(manuscript, journal, authors, publisher, 
               target_agreement):
    
    # get a temporary file name
    pdf_fn = tempfile.NamedTemporaryFile().name
    
    # generate the appropriate PDF
    if target_agreement in agreements.handlers:
        agreements.handlers[target_agreement]() (
            pdf_fn, 
            manuscript=manuscript.decode(sys.getdefaultencoding(),'ignore'),
            journal=journal.decode(sys.getdefaultencoding(),'ignore'), 
            author=[
                a.decode(sys.getdefaultencoding(),'ignore') for a in authors],
            publisher=publisher.decode(sys.getdefaultencoding(),'ignore'))
    else:
        # invalid target
        show_error()
        
    # read the temporary file
    pdf_file = file(pdf_fn, "rb")
    result = pdf_file.read()
    pdf_file.close()

    # remove the temp file
    os.unlink(os.path.join(tempfile.tempdir, pdf_fn))

    # return the contents
    return result 
Beispiel #3
0
    def test_astype_unicode(self):

        # GH7758
        # a bit of magic is required to set default encoding encoding to utf-8
        digits = string.digits
        test_series = [
            Series([digits * 10,
                    tm.rands(63),
                    tm.rands(64),
                    tm.rands(1000)]),
            Series([u('データーサイエンス、お前はもう死んでいる')]),
        ]

        former_encoding = None
        if not compat.PY3:
            # in python we can force the default encoding for this test
            former_encoding = sys.getdefaultencoding()
            reload(sys)  # noqa
            sys.setdefaultencoding("utf-8")
        if sys.getdefaultencoding() == "utf-8":
            test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")]))
        for s in test_series:
            res = s.astype("unicode")
            expec = s.map(compat.text_type)
            assert_series_equal(res, expec)
        # restore the former encoding
        if former_encoding is not None and former_encoding != "utf-8":
            reload(sys)  # noqa
            sys.setdefaultencoding(former_encoding)
Beispiel #4
0
def ReportFatalException(e, stacktrace_on_error=False):
    """
    Formats various exceptions; raises SystemExit, never returns.
    """
    if isinstance(e, XsltException) or \
        isinstance(e, RuntimeException) or \
        isinstance(e, CompiletimeException):
        if stacktrace_on_error:
            traceback.print_exc(1000, sys.stderr)
        raise SystemExit(''.join([c.encode(sys.getdefaultencoding(), 'ignore')
                                  or "&#%d;" % ord(c) for c in e.message]))
    else:
        if stacktrace_on_error:
            traceback.print_exc(1000, sys.stderr)
            msg = ''
        else:
            exceptiontype = str(sys.exc_type)
            if exceptiontype.startswith('exceptions.'):
                exceptiontype = exceptiontype[11:]
            msg = 'An unexpected error occurred while processing.\n' + \
                  'The error was: %s: %s\n' % (exceptiontype, str(e)) + \
                  'Use the -e (--stacktrace-on-error) option for a full stack trace.'
        if msg:
            raise SystemExit(''.join([c.encode(sys.getdefaultencoding(), 'ignore')
                                      or "&#%d;" % ord(c) for c in msg]))
        else:
            sys.exit()
    def test_unicode(self):
        """Title can contain unicode characters."""
        if 'utf-8' != sys.getdefaultencoding():
            raise SkipTest("encoding '%s' can't deal with snowmen"
                    % sys.getdefaultencoding())

        rv = self.run_script(r"""
            snowman = u'\u2603'

            import setproctitle
            setproctitle.setproctitle("Hello, " + snowman + "!")

            import os
            print os.getpid()
            print os.popen("ps -o pid,command 2> /dev/null").read()
        """)
        lines = filter(None, rv.splitlines())
        pid = lines.pop(0)
        pids = dict([r.strip().split(None, 1) for r in lines])

        snowmen = [
            u'\u2603',          # ps supports unicode
            r'\M-b\M^X\M^C',    # ps output on BSD
            r'M-bM^XM^C',       # ps output on OS-X
        ]
        title = self._clean_up_title(pids[pid])
        for snowman in snowmen:
            if title == "Hello, " + snowman + "!":
                break
        else:
            self.fail("unexpected ps output: %r" % title)
def bug_1771_with_user_config(var):
    # sys.getdefaultencoding is considered as a never
    # returning function in the inconsistent_returns.rc file.
    if var == 1:
        sys.getdefaultencoding()
    else:
        return var * 2
Beispiel #7
0
def get_git_revision(repopath):
    """
    Return Git revision for the repository located at repopath
    
    Result is a tuple (latest commit hash, branch), with None values on
    error
    """
    try:
        git = programs.find_program('git')
        assert git is not None and osp.isdir(osp.join(repopath, '.git'))
        commit = programs.run_program(git, ['rev-parse', '--short', 'HEAD'],
                                      cwd=repopath).communicate()
        commit = commit[0].strip()
        if PY3:
            commit = commit.decode(sys.getdefaultencoding())

        # Branch
        branches = programs.run_program(git, ['branch'],
                                        cwd=repopath).communicate()
        branches = branches[0]
        if PY3:
            branches = branches.decode(sys.getdefaultencoding())
        branches = branches.split('\n')
        active_branch = [b for b in branches if b.startswith('*')]
        if len(active_branch) != 1:
            branch = None
        else:
            branch = active_branch[0].split(None, 1)[1]

        return commit, branch
    except (subprocess.CalledProcessError, AssertionError, AttributeError):
        return None, None
Beispiel #8
0
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs):
    """Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
    Optionally prints stdout and stderr while running."""
    try:
        sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
        stdout, stderr = sub.communicate(input=input)

        if type(stdout) != type(""):
            # decode on Python 3
            # do nothing on Python 2 (it just doesn't care about encoding anyway)
            stdout = stdout.decode(sys.getdefaultencoding(), "replace")
            stderr = stderr.decode(sys.getdefaultencoding(), "replace")

        returncode = sub.returncode
        if not silent:
            sys.stdout.write(stdout)
            sys.stderr.write(stderr)
    except OSError as e:
        if e.errno == errno.ENOENT and catch_enoent:
            raise DistutilsError('"%s" is not present on this system' % cmdline[0])
        else:
            raise
    if returncode != 0:
        raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n")))
    return stdout
Beispiel #9
0
    def test_utf8_html_error_template(self):
        """test the html_error_template with a Template containing utf8
        chars"""
        
        if util.py3k:
            code = """# -*- coding: utf-8 -*-
% if 2 == 2: /an error
${'привет'}
% endif
"""
        else:
            code = """# -*- coding: utf-8 -*-
% if 2 == 2: /an error
${u'привет'}
% endif
"""
        try:
            template = Template(code)
            template.render_unicode()
        except exceptions.CompileException, ce:
            html_error = exceptions.html_error_template().render()
            assert ("CompileException: Fragment 'if 2 == 2: /an "
                    "error' is not a partial control "
                    "statement at line: 2 char: 1") in \
                    html_error.decode('utf-8')
                    
            if util.py3k:
                assert u"3 ${'привет'}".encode(sys.getdefaultencoding(),
                                            'htmlentityreplace') in html_error
            else:
                assert u"3 ${u'привет'}".encode(sys.getdefaultencoding(),
                                            'htmlentityreplace') in html_error
Beispiel #10
0
def detect_console_encoding():
    """
    Try to find the most capable encoding supported by the console.
    slightly modified from the way IPython handles the same issue.
    """
    global _initial_defencoding

    encoding = None
    try:
        encoding = sys.stdout.encoding or sys.stdin.encoding
    except AttributeError:
        pass

    # try again for something better
    if not encoding or 'ascii' in encoding.lower():
        try:
            encoding = locale.getpreferredencoding()
        except Exception:
            pass

    # when all else fails. this will usually be "ascii"
    if not encoding or 'ascii' in encoding.lower():
        encoding = sys.getdefaultencoding()

    # GH3360, save the reported defencoding at import time
    # MPL backends may change it. Make available for debugging.
    if not _initial_defencoding:
        _initial_defencoding = sys.getdefaultencoding()

    return encoding
def build_multi_mime_message(content_type_pairs, file_type_pairs):
    """
    Argument:
    content_type_pairs - A list of tuples [(content, mime-type, filename)]
    file_type_pairs -- A list of strings formatted as file-path : mime-type
    """
    if len(file_type_pairs) == 0:
        return ""

    combined_message = MIMEMultipart()
    for i in file_type_pairs:
        (filename, format_type) = i.split(":", 1)
        filename = filename.strip()
        format_type = format_type.strip()
        with open(filename) as filehandle:
            contents = filehandle.read()
        sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
        sub_message.add_header('Content-Disposition', 'attachment;'
                               ' filename="%s"' % (filename))
        combined_message.attach(sub_message)
    for i in content_type_pairs:
        sub_message = MIMEText(i[0], i[1].strip(), sys.getdefaultencoding())
        if len(i) <= 3:
            sub_message.add_header('Content-Disposition',
                                   'attachment; filename="%s"' %
                                   (i[2].strip()))
        else:
            sub_message.add_header('Content-Disposition', 'attachment;'
                                   ' filename="%s"' % ("cs-cloud-init.yaml"))
        combined_message.attach(sub_message)

    return str(combined_message)
def encodeASCII(string, language=None): #from Unicodize and plex scanner and other sources
  if string=="": return ""
  ranges = [ {"from": ord(u"\u3300"), "to": ord(u"\u33ff")}, {"from": ord(u"\ufe30"), "to": ord(u"\ufe4f")}, {"from": ord(u"\uf900"), "to": ord(u"\ufaff")},  # compatibility ideographs
             {"from": ord(u"\u30a0"), "to": ord(u"\u30ff")}, {"from": ord(u"\u2e80"), "to": ord(u"\u2eff")},                                                  # Japanese Kana    # cjk radicals supplement
             {"from": ord(u"\u4e00"), "to": ord(u"\u9fff")}, {"from": ord(u"\u3400"), "to": ord(u"\u4dbf")}]                                                  # windows: TypeError: ord() expected a character, but string of length 2 found #{"from": ord(u"\U00020000"), "to": ord(u"\U0002a6df")}, #{"from": ord(u"\U0002a700"), "to": ord(u"\U0002b73f")}, #{"from": ord(u"\U0002b740"), "to": ord(u"\U0002b81f")}, #{"from": ord(u"\U0002b820"), "to": ord(u"\U0002ceaf")}, # included as of Unicode 8.0                             #{"from": ord(u"\U0002F800"), "to": ord(u"\U0002fa1f")}  # compatibility ideographs
  encodings, encoding = ['iso8859-1', 'utf-16', 'utf-16be', 'utf-8'], ord(string[0])                                                                          #
  if 0 <= encoding < len(encodings):  string = string[1:].decode('cp949') if encoding == 0 and language == 'ko' else string[1:].decode(encodings[encoding])   # If we're dealing with a particular language, we might want to try another code page.
  if sys.getdefaultencoding() not in encodings:
    try:     string = string.decode(sys.getdefaultencoding())
    except:  pass
  if not sys.getfilesystemencoding()==sys.getdefaultencoding():
    try:     string = string.decode(sys.getfilesystemencoding())
    except:  pass
  string = string.strip('\0')
  try:       string = unicodedata.normalize('NFKD', string)    # Unicode  to ascii conversion to corect most characters automatically
  except:    pass
  try:       string = re.sub(RE_UNICODE_CONTROL, '', string)   # Strip control characters.
  except:    pass
  try:       string = string.encode('ascii', 'replace')        # Encode into Ascii
  except:    pass
  original_string, string, i = string, list(string), 0
  while i < len(string):                                       ### loop through unicode and replace special chars with spaces then map if found ###
    if ord(string[i])<128:  i = i+1
    else: #non ascii char
      char, char2, char3, char_len = 0, "", [], unicodeLen(string[i])
      for x in range(0, char_len):
        char = 256*char + ord(string[i+x]); char2 += string[i+x]; char3.append(string[i+x])
        if not x==0: string[i] += string[i+x]; string[i+x]=''
      try:    asian_language = any([mapping["from"] <= ord("".join(char3).decode('utf8')) <= mapping["to"] for mapping in ranges])
      except: asian_language = False
      if char in CHARACTERS_MAP:  string[i]=CHARACTERS_MAP.get( char )
      elif not asian_language:    Log("*Character missing in CHARACTERS_MAP: %d:'%s'  , #'%s' %s, string: '%s'" % (char, char2, char2, char3, original_string))
      i += char_len
  return ''.join(string)
def new_dataset(model):
    """Initializes the dataset."""
    # print model.data_path
    dataset_id = model.id
    dataset_path = model.data_path
    print dataset_path
    for root, dirs, files in os.walk(dataset_path):
        first = 1
        print files

        for image in files:
            print image
            print sys.getdefaultencoding()
            image = image.encode("utf8")
            if image.find(".jpg") > 0:
                print image
                if image.find("thumb") > 0:
                    continue
                classname = root.split('/')[-1]
                print classname
                if first == 1:
                    new = Class(model.id,classname)
                    db.session.add(new)
                    first = 0
                this_class = Class.query.filter(and_(Class.class_name == classname,Class.dataset_id == dataset_id)).first()
                print dataset_id
                print image

                img = Image(dataset_id,this_class.id,os.path.join(root,image),"unlabelled")
                db.session.add(img)
            db.session.commit()
Beispiel #14
0
def upload2(pyew, doprint=True):
    """ upload file to virscan.org"""
    print sys.getdefaultencoding()
    register_openers()
    url = r'http://up.virscan.org/up.php'


    datagen, headers = multipart_encode({"upfile": open(pyew.filename, "rb"),
        'UPLOAD_IDENTIFIER' : 'KEYbc7cf6642fc84bf67747f1bbdce597f0',
        'langkey' : '1',
        'setcookie' : '0',
        'tempvar' : '',
        'fpath' : 'C:\\fakepath\\'+pyew.filename
        })


    request = urllib2.Request(url, datagen, headers)
    request.add_header('Host', 'up.virscan.org')
    request.add_header('Cache-Control', 'max-age=0')
    request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.104 Safari/537.36')
    request.add_header('Host', 'up.virscan.org')
    request.add_header('Accept-Encoding', 'gzip, deflate')
    request.add_header('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6')

    resp = urllib2.urlopen(request).read()
    if re.findall("innerHTML='(.*?) file upload", resp):
        print "Upload File Failed"
    else:
        print "Upload Success"
Beispiel #15
0
def main():
    args = sys.argv
    cur_sys = system()
    file_name = GLOBALS["db_name"]+"-"+str(options.port)+".log"
    # if cur_sys == "Darwin":
    #     f = "/Users/"+os.getlogin()+"/Desktop/"+ file_name
    # elif cur_sys == "Linux":
    #     f = os.getcwd() + "/" + file_name
    # else:
    #     raise NotImplementedError
    # args.append("--log_file_prefix=" + f)
    # logging.basicConfig(filename=f, level=logging.DEBUG)

    tornado.options.parse_command_line()
    applicaton = Application()
    http_server = tornado.httpserver.HTTPServer(applicaton, xheaders=True)

    http_server.listen(options.port)
    print("="*50)
    print("initializing program with port : ", options.port)
    print("="*50)
    print("my ip is : ", socket.gethostbyname(socket.gethostname()))
    print("="*50)
    print("File system DEFAULT Encoding-type is : ", sys.getdefaultencoding())
    print("File system Encoding-type is : ", sys.getfilesystemencoding())
    print("="*50)
    logging.info("File system DEFAULT Encoding-type is : " + str(sys.getdefaultencoding()))
    logging.info("File system Encoding-type is : " +  str(sys.getfilesystemencoding()))

    ioloop = tornado.ioloop.IOLoop.instance()
    ioloop.start()
Beispiel #16
0
 def test_read_write(self, tmpdir):
     x = tmpdir.join("hello")
     part = py.builtin._totext("hällo", "utf8")
     x.write(part)
     assert x.read() == part
     x.write(part.encode(sys.getdefaultencoding()))
     assert x.read() == part.encode(sys.getdefaultencoding())
Beispiel #17
0
 def test_startLogging(self):
     """
     startLogging() installs FileLogObserver and overrides sys.stdout and
     sys.stderr.
     """
     origStdout, origStderr = sys.stdout, sys.stderr
     self._startLoggingCleanup()
     # When done with test, reset stdout and stderr to current values:
     fakeFile = StringIO()
     observer = log.startLogging(fakeFile)
     self.addCleanup(observer.stop)
     log.msg("Hello!")
     self.assertIn("Hello!", fakeFile.getvalue())
     self.assertIsInstance(sys.stdout, LoggingFile)
     self.assertEqual(sys.stdout.level, NewLogLevel.info)
     encoding = getattr(origStdout, "encoding", None)
     if not encoding:
         encoding = sys.getdefaultencoding()
     self.assertEqual(sys.stdout.encoding.upper(), encoding.upper())
     self.assertIsInstance(sys.stderr, LoggingFile)
     self.assertEqual(sys.stderr.level, NewLogLevel.error)
     encoding = getattr(origStderr, "encoding", None)
     if not encoding:
         encoding = sys.getdefaultencoding()
     self.assertEqual(sys.stderr.encoding.upper(), encoding.upper())
Beispiel #18
0
def view_locale(request):
    loc_info = "getlocale: " + str(locale.getlocale()) + \
               "<br/>getdefaultlocale(): " + str(locale.getdefaultlocale()) + \
               "<br/>fs_encoding: " + str(sys.getfilesystemencoding()) + \
               "<br/>sys default encoding: " + str(sys.getdefaultencoding())
    "<br/>sys default encoding: " + str(sys.getdefaultencoding())
    return HttpResponse(loc_info)
def get_build_message(message, outputs_url, logs_urls=None,
                      applied_reviews=None,
                      failed_command=None):
    """Retrieves build message."""
    if logs_urls is None:
        logs_urls = []
    if applied_reviews is None:
        applied_reviews = []
    build_msg = "%s\n\n" % message
    if applied_reviews:
        build_msg += "Reviews applied: `%s`\n\n" % applied_reviews
    if failed_command:
        build_msg += "Failed command: `%s`\n\n" % failed_command
    build_msg += ("All the build artifacts available"
                  " at: %s\n\n" % (outputs_url))
    logs_msg = ''
    for url in logs_urls:
        response = urllib2.urlopen(url)
        log_content = response.read().decode(sys.getdefaultencoding())
        if log_content == '':
            continue
        file_name = url.split('/')[-1]
        logs_msg += "- [%s](%s):\n\n" % (file_name, url)
        logs_msg += "```\n"
        log_tail = log_content.split("\n")[-LOG_TAIL_LIMIT:]
        logs_msg += "\n".join(log_tail)
        logs_msg += "```\n\n"
    if logs_msg == '':
        return build_msg
    build_msg += "Relevant logs:\n\n%s" % (logs_msg)
    return build_msg.encode(sys.getdefaultencoding())
Beispiel #20
0
	def emit(self, record):
		category = record.name.encode(sys.getdefaultencoding())
		msg = record.getMessage()
		if record.exc_info is not None:
			msg += "\n" + "".join(traceback.format_exception(*record.exc_info))
		msg = msg.encode(sys.getdefaultencoding())
		BWLogging.logLevelToBigWorldFunction[record.levelno](category, msg, None)
Beispiel #21
0
    def execute_shell(cmd, input='', timeout=None):
        """
        :param cmd:
        :param input: sent to stdin
        :return: returncode, stdout, stderr.
        :raise: subprocess.TimeoutExpired (I kill the process!)
        """

        proc_stdin = subprocess.PIPE if input != '' else None
        proc_input = input if input != '' else None

        p = subprocess.Popen(cmd,
                             stdin=proc_stdin,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             shell=True,
                             preexec_fn=os.setsid if timeout else None)  # http://stackoverflow.com/a/4791612/801203
        try:
            if proc_input:
                out, err = p.communicate(bytes(proc_input, encoding=sys.getdefaultencoding()), timeout=timeout)
            else:
                out, err = p.communicate(timeout=timeout)
        except subprocess.TimeoutExpired:
            logging.debug('Timeout expired, killing the process')
            os.killpg(os.getpgid(p.pid), signal.SIGTERM)
            p.communicate()
            raise

        return p.returncode,\
               str(out, sys.getdefaultencoding()),\
               str(err, sys.getdefaultencoding())
Beispiel #22
0
 def test_startLogging(self):
     """
     startLogging() installs FileLogObserver and overrides sys.stdout and
     sys.stderr.
     """
     origStdout, origStderr = sys.stdout, sys.stderr
     self._startLoggingCleanup()
     # When done with test, reset stdout and stderr to current values:
     fakeFile = StringIO()
     observer = log.startLogging(fakeFile)
     self.addCleanup(observer.stop)
     log.msg("Hello!")
     self.assertIn("Hello!", fakeFile.getvalue())
     self.assertIsInstance(sys.stdout, log.StdioOnnaStick)
     self.assertEqual(sys.stdout.isError, False)
     encoding = getattr(origStdout, "encoding", None)
     if not encoding:
         encoding = sys.getdefaultencoding()
     self.assertEqual(sys.stdout.encoding, encoding)
     self.assertIsInstance(sys.stderr, log.StdioOnnaStick)
     self.assertEqual(sys.stderr.isError, True)
     encoding = getattr(origStderr, "encoding", None)
     if not encoding:
         encoding = sys.getdefaultencoding()
     self.assertEqual(sys.stderr.encoding, encoding)
Beispiel #23
0
    def send_email(self, to_, from_=None, subject=None, body=None,
                   subtype="plain", charset="utf-8"):

        message = MIMEText(body, subtype, charset)

        if subject:
            subject_header = Header()
            subject = (codecs.decode(bytearray(subject, sys.getdefaultencoding()), charset)
                       if isinstance(subject, str) else subject)
            subject_header.append(subject.strip())
            message["Subject"] = subject_header

        from_ = from_ or self.default_sender
        from_ = (codecs.decode(bytearray(from_, sys.getdefaultencoding()), charset)
                 if isinstance(from_, str) else from_)
        from_realname, from_addr = parseaddr(from_)
        from_header = Header()
        from_header.append(formataddr((from_realname, from_addr)))
        message['From'] = from_header

        to_ = (codecs.decode(bytearray(to_, sys.getdefaultencoding()), charset)
               if isinstance(to_, str) else to_)
        to_realname, to_addr = parseaddr(to_)
        to_header = Header()
        to_header.append(formataddr((to_realname, to_addr)))
        message['To'] = to_header

        self._send(message, from_addr, to_addr)
Beispiel #24
0
    def send_html_email(self, to_, from_=None, subject=None, text=None,
                        html=None, charset="utf-8"):

        message = MIMEMultipart("alternative")

        if subject:
            subject_header = Header()
            subject = (codecs.decode(bytearray(subject, sys.getdefaultencoding()), charset)
                       if isinstance(subject, str) else subject)
            subject_header.append(subject.strip())
            message["Subject"] = subject_header

        from_ = from_ or self.default_sender
        from_ = (codecs.decode(bytearray(from_, sys.getdefaultencoding()), charset)
                 if isinstance(from_, str) else from_)
        from_realname, from_addr = parseaddr(from_)
        from_header = Header()
        from_header.append(formataddr((from_realname, from_addr)))
        message['From'] = from_header

        to_ = (codecs.decode(bytearray(to_, sys.getdefaultencoding()), charset)
               if isinstance(to_, str) else to_)
        to_realname, to_addr = parseaddr(to_)
        to_header = Header()
        to_header.append(formataddr((to_realname, to_addr)))
        message['To'] = to_header

        message.attach(MIMEText(text, "plain", charset))
        message.attach(MIMEText(html, "html", charset))

        self._send(message, from_addr, to_addr)
Beispiel #25
0
	def unzipDataFile(self, dataFileName, unzipFileName):
		print sys.getdefaultencoding()
		file_object_Z = open(dataFileName, "rb")
		file_object_W = open(unzipFileName, "wb")
		try:
			nDictCnt = 0
			while True:
				urlLen = struct.unpack("L",file_object_Z.read(4))
				srcUrl = file_object_Z.read(urlLen[0])

				htmlLen = struct.unpack("L",file_object_Z.read(4))
				zipHtml = file_object_Z.read(htmlLen[0])
				srcHtml = zlib.decompress(zipHtml)

				htmlLen = struct.pack("L",len(srcHtml))

				urlLen = struct.pack("L",urlLen[0])
				file_object_W.write(urlLen)
				file_object_W.write(srcUrl)

				file_object_W.write(htmlLen)
				file_object_W.write(srcHtml)
				#print srcHtml
				#break

		except IOError, e:
			print "IOError:", e
Beispiel #26
0
    def body(self):
        r'''
        Extract the plain text body of the message with signatures
        stripped off.

        :param message: the message to extract the body from
        :type  message: :class:`notmuch.Message`
        :returns: the extracted text body
        :rtype:   :class:`list` of :class:`str`o
        '''
        if self._body is None:
            content = []
            for part in self.mail.walk():
                if part.get_content_type() == 'text/plain':
                    raw_payload = part.get_payload(decode=True)
                    encoding = part.get_content_charset()
                    if encoding:
                        try:
                            raw_payload = raw_payload.decode(encoding,
                                                             'replace')
                        except LookupError:
                            raw_payload = raw_payload.decode(
                                sys.getdefaultencoding(), 'replace')
                    else:
                        raw_payload = raw_payload.decode(
                            sys.getdefaultencoding(), 'replace')

                    lines = raw_payload.split('\n')
                    lines = self._strip_signatures(lines)

                    content.append('\n'.join(lines))
            self._body = '\n'.join(content)

        return self._body
Beispiel #27
0
def runCommand(cmd, instream = None, msg = '', upon_succ = None, show_stderr = False, return_zero = True):
    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    popen_env = os.environ.copy()
    popen_env.update(env.path)
    try:
        tc = subprocess.Popen(cmd, stdin = subprocess.PIPE,
                              stdout = subprocess.PIPE, stderr = subprocess.PIPE,
                              env=popen_env)
        if instream:
            instream = instream.encode(sys.getdefaultencoding())
            out, error = tc.communicate(instream)
        else:
            out, error = tc.communicate()
        out = out.decode(sys.getdefaultencoding())
        error = error.decode(sys.getdefaultencoding())
        if return_zero:
            if tc.returncode < 0:
                raise ValueError ("Command '{0}' was terminated by signal {1}".format(cmd, -tc.returncode))
            elif tc.returncode > 0:
                raise ValueError ("{0}".format(error))
        if error.strip() and show_stderr:
            env.logger.error(error)
    except OSError as e:
        raise OSError ("Execution of command '{0}' failed: {1}".format(cmd, e))
    # everything is OK
    if upon_succ:
        # call the function (upon_succ) using others as parameters.
        upon_succ[0](*(upon_succ[1:]))
    return out.strip(), error.strip()
Beispiel #28
0
 def DetectEncodingAndRead(self, fd):
     encodings = ["utf-8", "utf-16"]
     if locale.getpreferredencoding() not in encodings:
         encodings.append(locale.getpreferredencoding())
     if sys.getdefaultencoding() not in encodings:
         encodings.append(sys.getdefaultencoding())
     if locale.getdefaultlocale()[1] not in encodings:
         encodings.append(locale.getdefaultlocale()[1])
     if sys.getfilesystemencoding() not in encodings:
         encodings.append(sys.getfilesystemencoding())
     if 'latin-1' not in encodings:
         encodings.append('latin-1')
         
     for enc in encodings:
         fd.seek(0)
         try:
             reader = codecs.getreader(enc)(fd)
             content = reader.read()
         except:
             continue
         else:
             self._encoding = enc
             logger.info("Detect file %s 's encoding is %s" % (self.GetFilename(), self._encoding))
             return content
         
     logger.error("Fail to detect the encoding for file %s" % self.GetFilename())
     return None
Beispiel #29
0
def main(query, exclude):
    print sys.getdefaultencoding()
    createWorkingDirectories(query)
    getGooogleResults(query, exclude)
    file = open("researchResults/" + query + "/data/searchResults.txt")
    line = file.readline()
    while line:
Beispiel #30
0
 def __init__(self, msg, stderr=None, stdout=None):
     # type: (str, bytes, bytes) -> None
     if stderr:
         msg += '\n[stderr]\n' + stderr.decode(sys.getdefaultencoding(), 'replace')
     if stdout:
         msg += '\n[stdout]\n' + stdout.decode(sys.getdefaultencoding(), 'replace')
     super().__init__(msg)
Beispiel #31
0
from datetime import datetime, timedelta
from errno import ENOENT
from functools import lru_cache
from importlib import import_module
from numbers import Integral, Number
from threading import Lock
from typing import Dict, Iterable, Mapping, Optional, Type, TypeVar
from weakref import WeakValueDictionary

from .core import get_deps

K = TypeVar("K")
V = TypeVar("V")


system_encoding = sys.getdefaultencoding()
if system_encoding == "ascii":
    system_encoding = "utf-8"


def apply(func, args, kwargs=None):
    if kwargs:
        return func(*args, **kwargs)
    else:
        return func(*args)


def _deprecated(
    *,
    version: str = None,
    message: str = None,
Beispiel #32
0

def truncate_words(s, num, end_text='...'):
    # truncate_words was removed in Django 1.5.
    truncate = end_text and ' %s' % end_text or ''
    return Truncator(s).words(num, truncate=truncate)


truncate_words = allow_lazy(truncate_words, six.text_type)

LTE_DJANGO_1_8 = django.VERSION < (1, 9)
LTE_DJANGO_1_9 = django.VERSION < (1, 10)
GTE_DJANGO_1_10 = django.VERSION >= (1, 10)

if not six.PY3:
    fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()


# copied from django.utils._os (not present in Django 1.4)
def upath(path):
    """
    Always return a unicode path.
    """
    if six.PY2 and not isinstance(path, six.text_type):
        return path.decode(fs_encoding)
    return path


# copied from django-cms (for compatibility with Django 1.4)
try:
    from django.utils.encoding import force_unicode  # flake8: noqa
Beispiel #33
0
# -*- coding:utf-8 -*-
import sys, os
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
    reload(sys)
    sys.setdefaultencoding(default_encoding)

from django.db import models
from django import forms
from django.utils.html import format_html
import uuid


# Create your models here.
class Catinfo(models.Model):
    name = models.CharField(max_length=10, verbose_name="名称")
    nameinfo = models.CharField(max_length=1000)
    feature = models.CharField(max_length=1000)
    livemethod = models.CharField(max_length=1000)
    feednn = models.CharField(max_length=1000)
    feedmethod = models.CharField(max_length=1000)

    def __str__(self):
        return self.name


class Person(models.Model):
    sex_choice = (
        (0, '男'),
        (1, '女'),
    )
Beispiel #34
0
        def _earlyExceptionHandler(ty, value, traceback):
            util.ipmi_report(constants.IPMI_FAILED)
            util.vtActivate(1)
            return sys.__excepthook__(ty, value, traceback)

        sys.excepthook = _earlyExceptionHandler

    if conf.system.can_audit:
        # auditd will turn into a daemon and exit. Ignore startup errors
        try:
            util.execWithRedirect("/sbin/auditd", [])
        except OSError:
            pass

    log.info("anaconda called with cmdline = %s", sys.argv)
    log.info("Default encoding = %s ", sys.getdefaultencoding())

    # start dbus session (if not already running) and run boss in it
    try:
        anaconda.dbus_launcher.start()
    except TimeoutError as e:
        stdout_log.error(str(e))
        anaconda.dbus_launcher.stop()
        util.ipmi_report(constants.IPMI_ABORTED)
        time.sleep(10)
        sys.exit(1)

    # Collect all addon paths
    addon_paths = collect_addon_paths(constants.ADDON_PATHS)

    # If we were given a kickstart file on the command line, parse (but do not
Beispiel #35
0
import dataclasses
import importlib.abc
import importlib.util
import itertools
import logging
import os
import shutil
import subprocess
import sys
import traceback
import typing as t

logger = logging.getLogger(__name__)

SHELL = "/bin/bash"
ENCODING = sys.getdefaultencoding()

if t.TYPE_CHECKING or sys.version_info[:2] >= (3, 9):
    _T_CompletedProcess = subprocess.CompletedProcess[str]
else:
    _T_CompletedProcess = subprocess.CompletedProcess

_K = t.TypeVar("_K")
_V = t.TypeVar("_V")


def rm_singletons(
        d: t.Dict[_K, t.Union[_V, t.List[_V]]]) -> t.Dict[_K, t.List[_V]]:
    """Convert single values in a dictionary to a list with that value.

    >>> rm_singletons({ "k": "v" })
Beispiel #36
0
    def fix_bin_or_dylib(self, fname):
        """Modifies the interpreter section of 'fname' to fix the dynamic linker,
        or the RPATH section, to fix the dynamic library search path

        This method is only required on NixOS and uses the PatchELF utility to
        change the interpreter/RPATH of ELF executables.

        Please see https://nixos.org/patchelf.html for more information
        """
        default_encoding = sys.getdefaultencoding()
        try:
            ostype = subprocess.check_output(
                ['uname', '-s']).strip().decode(default_encoding)
        except subprocess.CalledProcessError:
            return
        except OSError as reason:
            if getattr(reason, 'winerror', None) is not None:
                return
            raise reason

        if ostype != "Linux":
            return

        # Use `/etc/os-release` instead of `/etc/NIXOS`.
        # The latter one does not exist on NixOS when using tmpfs as root.
        try:
            with open("/etc/os-release", "r") as f:
                if not any(line.strip() == "ID=nixos" for line in f):
                    return
        except FileNotFoundError:
            return
        if os.path.exists("/lib"):
            return

        # At this point we're pretty sure the user is running NixOS
        nix_os_msg = "info: you seem to be running NixOS. Attempting to patch"
        print(nix_os_msg, fname)

        # Only build `.nix-deps` once.
        nix_deps_dir = self.nix_deps_dir
        if not nix_deps_dir:
            # Run `nix-build` to "build" each dependency (which will likely reuse
            # the existing `/nix/store` copy, or at most download a pre-built copy).
            #
            # Importantly, we create a gc-root called `.nix-deps` in the `build/`
            # directory, but still reference the actual `/nix/store` path in the rpath
            # as it makes it significantly more robust against changes to the location of
            # the `.nix-deps` location.
            #
            # bintools: Needed for the path of `ld-linux.so` (via `nix-support/dynamic-linker`).
            # zlib: Needed as a system dependency of `libLLVM-*.so`.
            # patchelf: Needed for patching ELF binaries (see doc comment above).
            nix_deps_dir = "{}/{}".format(self.build_dir, ".nix-deps")
            nix_expr = '''
            with (import <nixpkgs> {});
            symlinkJoin {
              name = "rust-stage0-dependencies";
              paths = [
                zlib
                patchelf
                stdenv.cc.bintools
              ];
            }
            '''
            try:
                subprocess.check_output([
                    "nix-build",
                    "-E",
                    nix_expr,
                    "-o",
                    nix_deps_dir,
                ])
            except subprocess.CalledProcessError as reason:
                print("warning: failed to call nix-build:", reason)
                return
            self.nix_deps_dir = nix_deps_dir

        patchelf = "{}/bin/patchelf".format(nix_deps_dir)
        rpath_entries = [
            # Relative default, all binary and dynamic libraries we ship
            # appear to have this (even when `../lib` is redundant).
            "$ORIGIN/../lib",
            os.path.join(os.path.realpath(nix_deps_dir), "lib")
        ]
        patchelf_args = ["--set-rpath", ":".join(rpath_entries)]
        if not fname.endswith(".so"):
            # Finally, set the corret .interp for binaries
            with open("{}/nix-support/dynamic-linker".format(
                    nix_deps_dir)) as dynamic_linker:
                patchelf_args += [
                    "--set-interpreter",
                    dynamic_linker.read().rstrip()
                ]

        try:
            subprocess.check_output([patchelf] + patchelf_args + [fname])
        except subprocess.CalledProcessError as reason:
            print("warning: failed to call patchelf:", reason)
            return
Beispiel #37
0
    def download_toolchain(self, stage0=True, rustc_channel=None):
        """Fetch the build system for Rust, written in Rust

        This method will build a cache directory, then it will fetch the
        tarball which has the stage0 compiler used to then bootstrap the Rust
        compiler itself.

        Each downloaded tarball is extracted, after that, the script
        will move all the content to the right place.
        """
        if rustc_channel is None:
            rustc_channel = self.rustc_channel
        rustfmt_channel = self.rustfmt_channel
        bin_root = self.bin_root(stage0)

        key = self.date
        if not stage0:
            key += str(self.rustc_commit)
        if self.rustc(stage0).startswith(bin_root) and \
                (not os.path.exists(self.rustc(stage0)) or
                 self.program_out_of_date(self.rustc_stamp(stage0), key)):
            if os.path.exists(bin_root):
                shutil.rmtree(bin_root)
            tarball_suffix = '.tar.xz' if support_xz() else '.tar.gz'
            filename = "rust-std-{}-{}{}".format(rustc_channel, self.build,
                                                 tarball_suffix)
            pattern = "rust-std-{}".format(self.build)
            self._download_component_helper(filename, pattern, tarball_suffix,
                                            stage0)
            filename = "rustc-{}-{}{}".format(rustc_channel, self.build,
                                              tarball_suffix)
            self._download_component_helper(filename, "rustc", tarball_suffix,
                                            stage0)
            # download-rustc doesn't need its own cargo, it can just use beta's.
            if stage0:
                filename = "cargo-{}-{}{}".format(rustc_channel, self.build,
                                                  tarball_suffix)
                self._download_component_helper(filename, "cargo",
                                                tarball_suffix)
                self.fix_bin_or_dylib("{}/bin/cargo".format(bin_root))
            else:
                filename = "rustc-dev-{}-{}{}".format(rustc_channel,
                                                      self.build,
                                                      tarball_suffix)
                self._download_component_helper(filename, "rustc-dev",
                                                tarball_suffix, stage0)

            self.fix_bin_or_dylib("{}/bin/rustc".format(bin_root))
            self.fix_bin_or_dylib("{}/bin/rustdoc".format(bin_root))
            lib_dir = "{}/lib".format(bin_root)
            for lib in os.listdir(lib_dir):
                if lib.endswith(".so"):
                    self.fix_bin_or_dylib(os.path.join(lib_dir, lib))
            with output(self.rustc_stamp(stage0)) as rust_stamp:
                rust_stamp.write(key)

        if self.rustfmt() and self.rustfmt().startswith(bin_root) and (
                not os.path.exists(self.rustfmt()) or self.program_out_of_date(
                    self.rustfmt_stamp(), self.rustfmt_channel)):
            if rustfmt_channel:
                tarball_suffix = '.tar.xz' if support_xz() else '.tar.gz'
                [channel, date] = rustfmt_channel.split('-', 1)
                filename = "rustfmt-{}-{}{}".format(channel, self.build,
                                                    tarball_suffix)
                self._download_component_helper(filename,
                                                "rustfmt-preview",
                                                tarball_suffix,
                                                key=date)
                self.fix_bin_or_dylib("{}/bin/rustfmt".format(bin_root))
                self.fix_bin_or_dylib("{}/bin/cargo-fmt".format(bin_root))
                with output(self.rustfmt_stamp()) as rustfmt_stamp:
                    rustfmt_stamp.write(self.rustfmt_channel)

        # Avoid downloading LLVM twice (once for stage0 and once for the master rustc)
        if self.downloading_llvm() and stage0:
            # We want the most recent LLVM submodule update to avoid downloading
            # LLVM more often than necessary.
            #
            # This git command finds that commit SHA, looking for bors-authored
            # merges that modified src/llvm-project or other relevant version
            # stamp files.
            #
            # This works even in a repository that has not yet initialized
            # submodules.
            top_level = subprocess.check_output([
                "git",
                "rev-parse",
                "--show-toplevel",
            ]).decode(sys.getdefaultencoding()).strip()
            llvm_sha = subprocess.check_output([
                "git",
                "rev-list",
                "[email protected]",
                "-n1",
                "--merges",
                "--first-parent",
                "HEAD",
                "--",
                "{}/src/llvm-project".format(top_level),
                "{}/src/bootstrap/download-ci-llvm-stamp".format(top_level),
                # the LLVM shared object file is named `LLVM-12-rust-{version}-nightly`
                "{}/src/version".format(top_level)
            ]).decode(sys.getdefaultencoding()).strip()
            llvm_assertions = self.get_toml('assertions', 'llvm') == 'true'
            llvm_root = self.llvm_root()
            llvm_lib = os.path.join(llvm_root, "lib")
            if self.program_out_of_date(self.llvm_stamp(),
                                        llvm_sha + str(llvm_assertions)):
                self._download_ci_llvm(llvm_sha, llvm_assertions)
                for binary in ["llvm-config", "FileCheck"]:
                    self.fix_bin_or_dylib(
                        os.path.join(llvm_root, "bin", binary))
                for lib in os.listdir(llvm_lib):
                    if lib.endswith(".so"):
                        self.fix_bin_or_dylib(os.path.join(llvm_lib, lib))
                with output(self.llvm_stamp()) as llvm_stamp:
                    llvm_stamp.write(llvm_sha + str(llvm_assertions))
Beispiel #38
0
def default_build_triple(verbose):
    """Build triple as in LLVM"""
    # If the user already has a host build triple with an existing `rustc`
    # install, use their preference. This fixes most issues with Windows builds
    # being detected as GNU instead of MSVC.
    default_encoding = sys.getdefaultencoding()
    try:
        version = subprocess.check_output(["rustc", "--version", "--verbose"],
                                          stderr=subprocess.DEVNULL)
        version = version.decode(default_encoding)
        host = next(x for x in version.split('\n') if x.startswith("host: "))
        triple = host.split("host: ")[1]
        if verbose:
            print("detected default triple {}".format(triple))
        return triple
    except Exception as e:
        if verbose:
            print("rustup not detected: {}".format(e))
            print("falling back to auto-detect")

    required = sys.platform != 'win32'
    ostype = require(["uname", "-s"], exit=required)
    cputype = require(['uname', '-m'], exit=required)

    # If we do not have `uname`, assume Windows.
    if ostype is None or cputype is None:
        return 'x86_64-pc-windows-msvc'

    ostype = ostype.decode(default_encoding)
    cputype = cputype.decode(default_encoding)

    # The goal here is to come up with the same triple as LLVM would,
    # at least for the subset of platforms we're willing to target.
    ostype_mapper = {
        'Darwin': 'apple-darwin',
        'DragonFly': 'unknown-dragonfly',
        'FreeBSD': 'unknown-freebsd',
        'Haiku': 'unknown-haiku',
        'NetBSD': 'unknown-netbsd',
        'OpenBSD': 'unknown-openbsd'
    }

    # Consider the direct transformation first and then the special cases
    if ostype in ostype_mapper:
        ostype = ostype_mapper[ostype]
    elif ostype == 'Linux':
        os_from_sp = subprocess.check_output(
            ['uname', '-o']).strip().decode(default_encoding)
        if os_from_sp == 'Android':
            ostype = 'linux-android'
        else:
            ostype = 'unknown-linux-gnu'
    elif ostype == 'SunOS':
        ostype = 'pc-solaris'
        # On Solaris, uname -m will return a machine classification instead
        # of a cpu type, so uname -p is recommended instead.  However, the
        # output from that option is too generic for our purposes (it will
        # always emit 'i386' on x86/amd64 systems).  As such, isainfo -k
        # must be used instead.
        cputype = require(['isainfo', '-k']).decode(default_encoding)
        # sparc cpus have sun as a target vendor
        if 'sparc' in cputype:
            ostype = 'sun-solaris'
    elif ostype.startswith('MINGW'):
        # msys' `uname` does not print gcc configuration, but prints msys
        # configuration. so we cannot believe `uname -m`:
        # msys1 is always i686 and msys2 is always x86_64.
        # instead, msys defines $MSYSTEM which is MINGW32 on i686 and
        # MINGW64 on x86_64.
        ostype = 'pc-windows-gnu'
        cputype = 'i686'
        if os.environ.get('MSYSTEM') == 'MINGW64':
            cputype = 'x86_64'
    elif ostype.startswith('MSYS'):
        ostype = 'pc-windows-gnu'
    elif ostype.startswith('CYGWIN_NT'):
        cputype = 'i686'
        if ostype.endswith('WOW64'):
            cputype = 'x86_64'
        ostype = 'pc-windows-gnu'
    elif sys.platform == 'win32':
        # Some Windows platforms might have a `uname` command that returns a
        # non-standard string (e.g. gnuwin32 tools returns `windows32`). In
        # these cases, fall back to using sys.platform.
        return 'x86_64-pc-windows-msvc'
    else:
        err = "unknown OS type: {}".format(ostype)
        sys.exit(err)

    if cputype == 'powerpc' and ostype == 'unknown-freebsd':
        cputype = subprocess.check_output(['uname', '-p'
                                           ]).strip().decode(default_encoding)
    cputype_mapper = {
        'BePC': 'i686',
        'aarch64': 'aarch64',
        'amd64': 'x86_64',
        'arm64': 'aarch64',
        'i386': 'i686',
        'i486': 'i686',
        'i686': 'i686',
        'i786': 'i686',
        'powerpc': 'powerpc',
        'powerpc64': 'powerpc64',
        'powerpc64le': 'powerpc64le',
        'ppc': 'powerpc',
        'ppc64': 'powerpc64',
        'ppc64le': 'powerpc64le',
        's390x': 's390x',
        'x64': 'x86_64',
        'x86': 'i686',
        'x86-64': 'x86_64',
        'x86_64': 'x86_64'
    }

    # Consider the direct transformation first and then the special cases
    if cputype in cputype_mapper:
        cputype = cputype_mapper[cputype]
    elif cputype in {'xscale', 'arm'}:
        cputype = 'arm'
        if ostype == 'linux-android':
            ostype = 'linux-androideabi'
        elif ostype == 'unknown-freebsd':
            cputype = subprocess.check_output(
                ['uname', '-p']).strip().decode(default_encoding)
            ostype = 'unknown-freebsd'
    elif cputype == 'armv6l':
        cputype = 'arm'
        if ostype == 'linux-android':
            ostype = 'linux-androideabi'
        else:
            ostype += 'eabihf'
    elif cputype in {'armv7l', 'armv8l'}:
        cputype = 'armv7'
        if ostype == 'linux-android':
            ostype = 'linux-androideabi'
        else:
            ostype += 'eabihf'
    elif cputype == 'mips':
        if sys.byteorder == 'big':
            cputype = 'mips'
        elif sys.byteorder == 'little':
            cputype = 'mipsel'
        else:
            raise ValueError("unknown byteorder: {}".format(sys.byteorder))
    elif cputype == 'mips64':
        if sys.byteorder == 'big':
            cputype = 'mips64'
        elif sys.byteorder == 'little':
            cputype = 'mips64el'
        else:
            raise ValueError('unknown byteorder: {}'.format(sys.byteorder))
        # only the n64 ABI is supported, indicate it
        ostype += 'abi64'
    elif cputype == 'sparc' or cputype == 'sparcv9' or cputype == 'sparc64':
        pass
    else:
        err = "unknown cpu type: {}".format(cputype)
        sys.exit(err)

    return "{}-{}".format(cputype, ostype)
Beispiel #39
0
def prints(tstr):
    """ lovely unicode
    """
    sys.stdout.write('%s\n' % (tstr.encode(sys.getdefaultencoding(),
                         'replace')))
    sys.stdout.flush()
# coding:utf-8
# create by zhaotianxiang
import pymysql
import sys

print(sys.getdefaultencoding())
from ConnectToMySQL import connectMySQL

db = connectMySQL()
# 后面的编码格式极其重要,耽误了两个小时,下次操作数据的时候一定要注意设置编码的一致
cursor = db.cursor()


def createTable():
    cursor.execute("DROP TABLE IF EXISTS DailyBoxOffices")
    sql = """CREATE TABLE (
    itemID INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
    itemDate DATE NOT NULL,
    movieName VARCHAR(45) NOT NULL,
    boxOffice CHAR(10),
    proportion CHAR(10),
    attendence CHAR(10),
    releaseDays CHAR(10))"""
    try:
        cursor.execute(sql)
    except:
        print("error to create table!\n")


def storeToDatabase():
    file_read = open('../data/movie_erverday_information.txt',
Beispiel #41
0
    def onOpen(self, loadFirst='', loadEncode=''):
        """
        2.1: total rewrite for Unicode support; open in text mode with 
        an encoding passed in, input from the user, in textconfig, or  
        platform default, or open as binary bytes for arbitrary Unicode
        encodings as last resort and drop \r in Windows end-lines if 
        present so text displays normally; content fetches are returned
        as str, so need to  encode on saves: keep encoding used here;

        tests if file is okay ahead of time to try to avoid opens;
        we could also load and manually decode bytes to str to avoid 
        multiple open attempts, but this is unlikely to try all cases;

        encoding behavior is configurable in the local textConfig.py:
        1) tries known type first if passed in by client (email charsets)
        2) if opensAskUser True, try user input next (prefill wih defaults)
        3) if opensEncoding nonempty, try this encoding next: 'latin-1', etc.
        4) tries sys.getdefaultencoding() platform default next
        5) uses binary mode bytes and Tk policy as the last resort
        """

        if self.text_edit_modified():    # 2.0
            if not askyesno('PyEdit', 'Text has changed: discard changes?'):
                return

        file = loadFirst or self.my_askopenfilename()
        if not file: 
            return
        
        if not os.path.isfile(file):
            showerror('PyEdit', 'Could not open file ' + file)
            return

        # try known encoding if passed and accurate (e.g., email)
        text = None     # empty file = '' = False: test for None!
        if loadEncode:
            try:
                text = open(file, 'r', encoding=loadEncode).read()
                self.knownEncoding = loadEncode
            except (UnicodeError, LookupError, IOError):         # lookup: bad name
                pass

        # try user input, prefill with next choice as default
        if text == None and self.opensAskUser:
            self.update()  # else dialog doesn't appear in rare cases
            askuser = askstring('PyEdit', 'Enter Unicode encoding for open',
                                initialvalue=(self.opensEncoding or 
                                              sys.getdefaultencoding() or ''))
            if askuser:
                try:
                    text = open(file, 'r', encoding=askuser).read()
                    self.knownEncoding = askuser
                except (UnicodeError, LookupError, IOError):
                    pass

        # try config file (or before ask user?)
        if text == None and self.opensEncoding:
            try:
                text = open(file, 'r', encoding=self.opensEncoding).read()
                self.knownEncoding = self.opensEncoding
            except (UnicodeError, LookupError, IOError):
                pass

        # try platform default (utf-8 on windows; try utf8 always?)
        if text == None:
            try:
                text = open(file, 'r', encoding=sys.getdefaultencoding()).read()
                self.knownEncoding = sys.getdefaultencoding()
            except (UnicodeError, LookupError, IOError):
                pass

        # last resort: use binary bytes and rely on Tk to decode
        if text == None:
            try:
                text = open(file, 'rb').read()         # bytes for Unicode
                text = text.replace(b'\r\n', b'\n')    # for display, saves
                self.knownEncoding = None
            except IOError:
                pass

        if text == None:
            showerror('PyEdit', 'Could not decode and open file ' + file)
        else:
            self.setAllText(text)
            self.setFileName(file)
            self.text.edit_reset()             # 2.0: clear undo/redo stks
            self.text.edit_modified(0)         # 2.0: clear modified flag
Beispiel #42
0
    def onSaveAs(self, forcefile=None):
        """
        2.1: total rewrite for Unicode support: Text content is always 
        returned as a str, so we must deal with encodings to save to
        a file here, regardless of open mode of the output file (binary
        requires bytes, and text must encode); tries the encoding used
        when opened or saved (if known), user input, config file setting,
        and platform default last; most users can use platform default; 

        retains successful encoding name here for next save, because this
        may be the first Save after New or a manual text insertion;  Save
        and SaveAs may both use last known enocding, per config file (it
        probably should be used for Save, but SaveAs usage is unclear);
        gui prompts are prefilled with the known encoding if there is one;
        
        does manual text.encode() to avoid creating file; text mode files
        perform platform specific end-line conversion: Windows \r dropped 
        if present on open by text mode (auto) and binary mode (manually);
        if manual content inserts, must delete \r else duplicates here;
        knownEncoding=None before first Open or Save, after New, if binary Open;

        encoding behavior is configurable in the local textConfig.py:
        1) if savesUseKnownEncoding > 0, try encoding from last open or save
        2) if savesAskUser True, try user input next (prefill with known?)
        3) if savesEncoding nonempty, try this encoding next: 'utf-8', etc
        4) tries sys.getdefaultencoding() as a last resort
        """

        filename = forcefile or self.my_asksaveasfilename()
        if not filename:
            return

        text = self.getAllText()      # 2.1: a str string, with \n eolns,
        encpick = None                # even if read/inserted as bytes 

        # try known encoding at latest Open or Save, if any
        if self.knownEncoding and (                                  # enc known?
           (forcefile     and self.savesUseKnownEncoding >= 1) or    # on Save?
           (not forcefile and self.savesUseKnownEncoding >= 2)):     # on SaveAs?
            try:
                text.encode(self.knownEncoding)
                encpick = self.knownEncoding
            except UnicodeError:
                pass

        # try user input, prefill with known type, else next choice
        if not encpick and self.savesAskUser:
            self.update()  # else dialog doesn't appear in rare cases
            askuser = askstring('PyEdit', 'Enter Unicode encoding for save',
                                initialvalue=(self.knownEncoding or 
                                              self.savesEncoding or 
                                              sys.getdefaultencoding() or ''))
            if askuser:
                try:
                    text.encode(askuser)
                    encpick = askuser
                except (UnicodeError, LookupError):    # LookupError:  bad name 
                    pass                               # UnicodeError: can't encode

        # try config file
        if not encpick and self.savesEncoding:
            try:
                text.encode(self.savesEncoding)
                encpick = self.savesEncoding
            except (UnicodeError, LookupError):
                pass

        # try platform default (utf8 on windows)
        if not encpick:
            try:
                text.encode(sys.getdefaultencoding())
                encpick = sys.getdefaultencoding()
            except (UnicodeError, LookupError):
                pass

        # open in text mode for endlines + encoding
        if not encpick:
            showerror('PyEdit', 'Could not encode for file ' + filename)
        else:
            try:
                file = open(filename, 'w', encoding=encpick)
                file.write(text)
                file.close()
            except:
                showerror('PyEdit', 'Could not write file ' + filename)
            else:
                self.setFileName(filename)          # may be newly created
                self.text.edit_modified(0)          # 2.0: clear modified flag
                self.knownEncoding = encpick        # 2.1: keep enc for next save
Beispiel #43
0
 def test_getdefaultencoding(self):
     if test.test_support.have_unicode:
         self.assertRaises(TypeError, sys.getdefaultencoding, 42)
         # can't check more than the type, as the user might have changed it
         self.assertIsInstance(sys.getdefaultencoding(), str)
Beispiel #44
0
numbers = [1, 2, 3, 4, 11, 22, 33, 88, 99, 1000]
print numbers
even = []
odd = []
while len(numbers) > 0:
    num = numbers.pop()
    if (num % 2 == 0):
        even.append(num)
    else:
        odd.append(num)

print even
print odd
print even + odd
print set(even + odd)

count = 0
while (count <= 9):
    print "the count is:", count
    count += 1
print "gooods bye"

for str in "yanzhiwei":
    print str
    pass
print "end for;"

import sys
print sys.getdefaultencoding()
print isinstance("中国", unicode)
def _uni(s, e):
    # hack for py3
    if sys.version_info[0] > 2:
        return str(bytes(s, sys.getdefaultencoding()), e)
    else:
        return unicode(s, e)
Beispiel #46
0
    "NIEP": "http://eida-sc3.infp.ro",
    "NOA": "http://eida.gein.noa.gr",
    "ODC": "http://www.orfeus-eu.org",
    "ORFEUS": "http://www.orfeus-eu.org",
    "RESIF": "http://ws.resif.fr",
    "SCEDC": "http://service.scedc.caltech.edu",
    "USGS": "http://earthquake.usgs.gov",
    "USP": "http://sismo.iag.usp.br",
}

FDSNWS = ("dataselect", "event", "station")

if PY2:
    platform_ = platform.platform().decode("ascii", "ignore")
else:
    encoding = sys.getdefaultencoding() or "UTF-8"
    platform_ = platform.platform().encode(encoding).decode("ascii", "ignore")
# The default User Agent that will be sent with every request.
DEFAULT_USER_AGENT = "ObsPy/%s (%s, Python %s)" % (__version__, platform_,
                                                   platform.python_version())

# The default parameters. Different services can choose to add more. It always
# contains the long name first and the short name second. If it has no short
# name, it is simply a tuple with only one entry.
DEFAULT_DATASELECT_PARAMETERS = [
    "starttime", "endtime", "network", "station", "location", "channel"
]

OPTIONAL_DATASELECT_PARAMETERS = ["quality", "minimumlength", "longestonly"]

DEFAULT_STATION_PARAMETERS = [
Beispiel #47
0
# coding=utf-8
import socket
import sys
import logging

sys.getdefaultencoding()
logging.basicConfig(format='%(levelname)s: %(message)s')


def G(s):
    return "%s[32;2m%s%s[0m" % (chr(27), s, chr(27))


def A(s):
    return "%s[36;2m%s%s[0m" % (chr(27), s, chr(27))


def R(s):
    return "%s[31;2m%s%s[0m" % (chr(27), s, chr(27))


def tcp_test(url, port):
    """abcd"""
    sk = socket.socket()
    sk.settimeout(1)
    url, port = sys.argv[1], sys.argv[2]
    try:
        sk.connect((url, int(port)))
    except Exception:
        return 'Server(%s) port %s ' % (url, port) + R('not connect!')
    sk.close()
Beispiel #48
0
def get_gitlab_status():
    return (subprocess.run(
        "docker inspect -f {{.State.Health.Status}} gitlab".split(),
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    ).stdout.decode(sys.getdefaultencoding()).strip())
try:
    import imdb
except ImportError:
    print('You bad boy!  You need to install the IMDbPY package!')
    sys.exit(1)

if len(sys.argv) != 2:
    print('Only one argument is required:')
    print('  %s "company name"' % sys.argv[0])
    sys.exit(2)

name = sys.argv[1]

i = imdb.IMDb()

out_encoding = sys.stdout.encoding or sys.getdefaultencoding()

try:
    # Do the search, and get the results (a list of company objects).
    results = i.search_company(name)
except imdb.IMDbError as e:
    print("Probably you're not connected to Internet.  Complete error report:")
    print(e)
    sys.exit(3)

# Print the results.
print('    %s result%s for "%s":' % (len(results),
                                     ('', 's')[len(results) != 1], name))
print('companyID\t: imdbID : name')

# Print the long imdb name for every company.
Beispiel #50
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import os
from xml.dom.minidom import parse
from logging.handlers import TimedRotatingFileHandler
import sys
if sys.getdefaultencoding() != 'utf-8':
    reload(sys)
    sys.setdefaultencoding('utf-8')

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if not os.path.exists("logs"):
    os.mkdir("logs")
handler = TimedRotatingFileHandler(filename="logs/upload.log",
                                   when="d",
                                   interval=1,
                                   backupCount=90)
handler.setFormatter(
    logging.Formatter(
        '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s'
    ))
logger.addHandler(handler)

ftp_param = {}
upload_params = []
dom = parse("./config.xml")
# 获取文件元素对象
document = dom.documentElement
ftp_server = document.getElementsByTagName("ftp_server")[0]
Beispiel #51
0
 def encode(self, encoding=sys.getdefaultencoding()):
     """More ``unicode`` class emulation."""
     return str(self).encode(encoding)
Beispiel #52
0
 def decodeCommandLine(self, cmdline):
     """Turn a byte string from the command line into a unicode string.
     """
     codec = getattr(sys.stdin, 'encoding',
                     None) or sys.getdefaultencoding()
     return unicode(cmdline, codec)
Beispiel #53
0
def handle_encoding(bot, ievent):
    """ show default encoding. """
    ievent.reply('default encoding is %s' % bot.encoding
                 or sys.getdefaultencoding())
Beispiel #54
0
def compile_cython_modules(profile=False,
                           compile_more=False,
                           cython_with_refnanny=False):
    source_root = os.path.abspath(os.path.dirname(__file__))
    compiled_modules = [
        "Cython.Plex.Scanners",
        "Cython.Plex.Actions",
        "Cython.Compiler.Lexicon",
        "Cython.Compiler.Scanning",
        "Cython.Compiler.Parsing",
        "Cython.Compiler.Visitor",
        "Cython.Compiler.FlowControl",
        "Cython.Compiler.Code",
        "Cython.Runtime.refnanny",
        # "Cython.Compiler.FusedNode",
        "Cython.Tempita._tempita",
    ]
    if compile_more:
        compiled_modules.extend([
            "Cython.Build.Dependencies",
            "Cython.Compiler.ParseTreeTransforms",
            "Cython.Compiler.Nodes",
            "Cython.Compiler.ExprNodes",
            "Cython.Compiler.ModuleNode",
            "Cython.Compiler.Optimize",
        ])

    from distutils.spawn import find_executable
    from distutils.sysconfig import get_python_inc
    pgen = find_executable(
        'pgen',
        os.pathsep.join([
            os.environ['PATH'],
            os.path.join(get_python_inc(), '..', 'Parser')
        ]))
    if not pgen:
        print("Unable to find pgen, not compiling formal grammar.")
    else:
        parser_dir = os.path.join(os.path.dirname(__file__), 'Cython',
                                  'Parser')
        grammar = os.path.join(parser_dir, 'Grammar')
        subprocess.check_call([
            pgen,
            os.path.join(grammar),
            os.path.join(parser_dir, 'graminit.h'),
            os.path.join(parser_dir, 'graminit.c'),
        ])
        cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')
        if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:
            mtime = os.stat(grammar)[stat.ST_MTIME]
            os.utime(cst_pyx, (mtime, mtime))
        compiled_modules.extend([
            "Cython.Parser.ConcreteSyntaxTree",
        ])

    defines = []
    if cython_with_refnanny:
        defines.append(('CYTHON_REFNANNY', '1'))

    extensions = []
    if sys.version_info[0] >= 3:
        from Cython.Distutils import build_ext as build_ext_orig
        for module in compiled_modules:
            source_file = os.path.join(source_root, *module.split('.'))
            if os.path.exists(source_file + ".py"):
                pyx_source_file = source_file + ".py"
            else:
                pyx_source_file = source_file + ".pyx"
            dep_files = []
            if os.path.exists(source_file + '.pxd'):
                dep_files.append(source_file + '.pxd')
            if '.refnanny' in module:
                defines_for_module = []
            else:
                defines_for_module = defines
            extensions.append(
                Extension(module,
                          sources=[pyx_source_file],
                          define_macros=defines_for_module,
                          depends=dep_files))

        class build_ext(build_ext_orig):
            # we must keep the original modules alive to make sure
            # their code keeps working when we remove them from
            # sys.modules
            dead_modules = []

            def build_extensions(self):
                # add path where 2to3 installed the transformed sources
                # and make sure Python (re-)imports them from there
                already_imported = [
                    module for module in sys.modules
                    if module == 'Cython' or module.startswith('Cython.')
                ]
                keep_alive = self.dead_modules.append
                for module in already_imported:
                    keep_alive(sys.modules[module])
                    del sys.modules[module]
                sys.path.insert(0, os.path.join(source_root, self.build_lib))

                if profile:
                    from Cython.Compiler.Options import directive_defaults
                    directive_defaults['profile'] = True
                    print("Enabled profiling for the Cython binary modules")
                build_ext_orig.build_extensions(self)

        setup_args['ext_modules'] = extensions
        add_command_class("build_ext", build_ext)

    else:  # Python 2.x
        from distutils.command.build_ext import build_ext as build_ext_orig
        try:

            class build_ext(build_ext_orig):
                def build_extension(self, ext, *args, **kargs):
                    try:
                        build_ext_orig.build_extension(self, ext, *args,
                                                       **kargs)
                    except StandardError:
                        print("Compilation of '%s' failed" % ext.sources[0])

            from Cython.Compiler.Main import compile
            from Cython import Utils
            if profile:
                from Cython.Compiler.Options import directive_defaults
                directive_defaults['profile'] = True
                print("Enabled profiling for the Cython binary modules")
            source_root = os.path.dirname(__file__)
            for module in compiled_modules:
                source_file = os.path.join(source_root, *module.split('.'))
                if os.path.exists(source_file + ".py"):
                    pyx_source_file = source_file + ".py"
                else:
                    pyx_source_file = source_file + ".pyx"
                c_source_file = source_file + ".c"
                source_is_newer = False
                if not os.path.exists(c_source_file):
                    source_is_newer = True
                else:
                    c_last_modified = Utils.modification_time(c_source_file)
                    if Utils.file_newer_than(pyx_source_file, c_last_modified):
                        source_is_newer = True
                    else:
                        pxd_source_file = source_file + ".pxd"
                        if os.path.exists(
                                pxd_source_file) and Utils.file_newer_than(
                                    pxd_source_file, c_last_modified):
                            source_is_newer = True
                if source_is_newer:
                    print("Compiling module %s ..." % module)
                    result = compile(pyx_source_file)
                    c_source_file = result.c_file
                if c_source_file:
                    # Py2 distutils can't handle unicode file paths
                    if isinstance(c_source_file, unicode):
                        filename_encoding = sys.getfilesystemencoding()
                        if filename_encoding is None:
                            filename_encoding = sys.getdefaultencoding()
                        c_source_file = c_source_file.encode(filename_encoding)
                    if '.refnanny' in module:
                        defines_for_module = []
                    else:
                        defines_for_module = defines
                    extensions.append(
                        Extension(module,
                                  sources=[c_source_file],
                                  define_macros=defines_for_module))
                else:
                    print("Compilation failed")
            if extensions:
                setup_args['ext_modules'] = extensions
                add_command_class("build_ext", build_ext)
        except Exception:
            print('''
ERROR: %s

Extension module compilation failed, looks like Cython cannot run
properly on this system.  To work around this, pass the option
"--no-cython-compile".  This will install a pure Python version of
Cython without compiling its own sources.
''' % sys.exc_info()[1])
            raise
Beispiel #55
0
def compile(**kwargs):
    """There are three modes of parameters :func:`compile()` can take:
    ``string``, ``filename``, and ``dirname``.

    The ``string`` parameter is the most basic way to compile SASS.
    It simply takes a string of SASS code, and then returns a compiled
    CSS string.

    :param string: SASS source code to compile.  it's exclusive to
                   ``filename`` and ``dirname`` parameters
    :type string: :class:`str`
    :param output_style: an optional coding style of the compiled result.
                         choose one of: ``'nested'`` (default), ``'expanded'``,
                         ``'compact'``, ``'compressed'``
    :type output_style: :class:`str`
    :param source_comments: whether to add comments about source lines.
                            :const:`False` by default
    :type source_comments: :class:`bool`
    :param include_paths: an optional list of paths to find ``@import``\ ed
                          SASS/CSS source files
    :type include_paths: :class:`collections.Sequence`
    :param precision: optional precision for numbers. :const:`5` by default.
    :type precision: :class:`int`
    :param custom_functions: optional mapping of custom functions.
                             see also below `custom functions
                             <custom-functions_>`_ description
    :type custom_functions: :class:`collections.Set`,
                            :class:`collections.Sequence`,
                            :class:`collections.Mapping`
    :param indented: optional declaration that the string is SASS, not SCSS
                     formatted. :const:`False` by default
    :type indented: :class:`bool`
    :returns: the compiled CSS string
    :param importers: optional callback functions.
                     see also below `importer callbacks
                     <importer-callbacks_>`_ description
    :type importers: :class:`collections.Callable`
    :rtype: :class:`str`
    :raises sass.CompileError: when it fails for any reason
                               (for example the given SASS has broken syntax)

    The ``filename`` is the most commonly used way.  It takes a string of
    SASS filename, and then returns a compiled CSS string.

    :param filename: the filename of SASS source code to compile.
                     it's exclusive to ``string`` and ``dirname`` parameters
    :type filename: :class:`str`
    :param output_style: an optional coding style of the compiled result.
                         choose one of: ``'nested'`` (default), ``'expanded'``,
                         ``'compact'``, ``'compressed'``
    :type output_style: :class:`str`
    :param source_comments: whether to add comments about source lines.
                            :const:`False` by default
    :type source_comments: :class:`bool`
    :param source_map_filename: use source maps and indicate the source map
                                output filename.  :const:`None` means not
                                using source maps.  :const:`None` by default.
    :type source_map_filename: :class:`str`
    :param include_paths: an optional list of paths to find ``@import``\ ed
                          SASS/CSS source files
    :type include_paths: :class:`collections.Sequence`
    :param precision: optional precision for numbers. :const:`5` by default.
    :type precision: :class:`int`
    :param custom_functions: optional mapping of custom functions.
                             see also below `custom functions
                             <custom-functions_>`_ description
    :type custom_functions: :class:`collections.Set`,
                            :class:`collections.Sequence`,
                            :class:`collections.Mapping`
    :param importers: optional callback functions.
                     see also below `importer callbacks
                     <importer-callbacks_>`_ description
    :type importers: :class:`collections.Callable`
    :returns: the compiled CSS string, or a pair of the compiled CSS string
              and the source map string if ``source_map_filename`` is set
    :rtype: :class:`str`, :class:`tuple`
    :raises sass.CompileError: when it fails for any reason
                               (for example the given SASS has broken syntax)
    :raises exceptions.IOError: when the ``filename`` doesn't exist or
                                cannot be read

    The ``dirname`` is useful for automation.  It takes a pair of paths.
    The first of the ``dirname`` pair refers the source directory, contains
    several SASS source files to compiled.  SASS source files can be nested
    in directories.  The second of the pair refers the output directory
    that compiled CSS files would be saved.  Directory tree structure of
    the source directory will be maintained in the output directory as well.
    If ``dirname`` parameter is used the function returns :const:`None`.

    :param dirname: a pair of ``(source_dir, output_dir)``.
                    it's exclusive to ``string`` and ``filename``
                    parameters
    :type dirname: :class:`tuple`
    :param output_style: an optional coding style of the compiled result.
                         choose one of: ``'nested'`` (default), ``'expanded'``,
                         ``'compact'``, ``'compressed'``
    :type output_style: :class:`str`
    :param source_comments: whether to add comments about source lines.
                            :const:`False` by default
    :type source_comments: :class:`bool`
    :param include_paths: an optional list of paths to find ``@import``\ ed
                          SASS/CSS source files
    :type include_paths: :class:`collections.Sequence`
    :param precision: optional precision for numbers. :const:`5` by default.
    :type precision: :class:`int`
    :param custom_functions: optional mapping of custom functions.
                             see also below `custom functions
                             <custom-functions_>`_ description
    :type custom_functions: :class:`collections.Set`,
                            :class:`collections.Sequence`,
                            :class:`collections.Mapping`
    :raises sass.CompileError: when it fails for any reason
                               (for example the given SASS has broken syntax)

    .. _custom-functions:

    The ``custom_functions`` parameter can take three types of forms:

    :class:`~collections.Set`/:class:`~collections.Sequence` of \
    :class:`SassFunction`\ s
       It is the most general form.  Although pretty verbose, it can take
       any kind of callables like type objects, unnamed functions,
       and user-defined callables.

       .. code-block:: python

          sass.compile(
              ...,
              custom_functions={
                  sass.SassFunction('func-name', ('$a', '$b'), some_callable),
                  ...
              }
          )

    :class:`~collections.Mapping` of names to functions
       Less general, but easier-to-use form.  Although it's not it can take
       any kind of callables, it can take any kind of *functions* defined
       using :keyword:`def`/:keyword:`lambda` syntax.
       It cannot take callables other than them since inspecting arguments
       is not always available for every kind of callables.

       .. code-block:: python

          sass.compile(
              ...,
              custom_functions={
                  'func-name': lambda a, b: ...,
                  ...
              }
          )

    :class:`~collections.Set`/:class:`~collections.Sequence` of \
    named functions
       Not general, but the easiest-to-use form for *named* functions.
       It can take only named functions, defined using :keyword:`def`.
       It cannot take lambdas sinc names are unavailable for them.

       .. code-block:: python

          def func_name(a, b):
              return ...

          sass.compile(
              ...,
              custom_functions={func_name}
          )

    .. _importer-callbacks:

    Newer versions of ``libsass`` allow developers to define callbacks to be
    called and given a chance to process ``@import`` directives. You can
    define yours by passing in a list of callables via the ``importers``
    parameter. The callables must be passed as 2-tuples in the form:

    .. code-block:: python

        (priority_int, callback_fn)

    A priority of zero is acceptable; priority determines the order callbacks
    are attempted.

    These callbacks must accept a single string argument representing the path
    passed to the ``@import`` directive, and either return ``None`` to
    indicate the path wasn't handled by that callback (to continue with others
    or fall back on internal ``libsass`` filesystem behaviour) or a list of
    one or more tuples, each in one of three forms:

    * A 1-tuple representing an alternate path to handle internally; or,
    * A 2-tuple representing an alternate path and the content that path
      represents; or,
    * A 3-tuple representing the same as the 2-tuple with the addition of a
      "sourcemap".

    All tuple return values must be strings. As a not overly realistic
    example:

    .. code-block:: python

        def my_importer(path):
            return [(path, '#' + path + ' { color: red; }')]

        sass.compile(
                ...,
                importers=[(0, my_importer)]
            )

    Now, within the style source, attempting to ``@import 'button';`` will
    instead attach ``color: red`` as a property of an element with the
    imported name.

    .. versionadded:: 0.4.0
       Added ``source_comments`` and ``source_map_filename`` parameters.

    .. versionchanged:: 0.6.0
       The ``source_comments`` parameter becomes to take only :class:`bool`
       instead of :class:`str`.

    .. deprecated:: 0.6.0
       Values like ``'none'``, ``'line_numbers'``, and ``'map'`` for
       the ``source_comments`` parameter are deprecated.

    .. versionadded:: 0.7.0
       Added ``precision`` parameter.

    .. versionadded:: 0.7.0
       Added ``custom_functions`` parameter.

    .. versionadded:: 0.11.0
       ``source_map_filename`` no longer implies ``source_comments``.

    """
    modes = set()
    for mode_name in MODES:
        if mode_name in kwargs:
            modes.add(mode_name)
    if not modes:
        raise TypeError('choose one at least in ' + and_join(MODES))
    elif len(modes) > 1:
        raise TypeError(
            and_join(modes) + ' are exclusive each other; '
            'cannot be used at a time')
    precision = kwargs.pop('precision', 5)
    output_style = kwargs.pop('output_style', 'nested')
    if not isinstance(output_style, string_types):
        raise TypeError('output_style must be a string, not ' +
                        repr(output_style))
    try:
        output_style = OUTPUT_STYLES[output_style]
    except KeyError:
        raise CompileError('{0} is unsupported output_style; choose one of {1}'
                           ''.format(output_style, and_join(OUTPUT_STYLES)))
    source_comments = kwargs.pop('source_comments', False)
    if source_comments in SOURCE_COMMENTS:
        if source_comments == 'none':
            deprecation_message = ('you can simply pass False to '
                                   "source_comments instead of 'none'")
            source_comments = False
        elif source_comments in ('line_numbers', 'default'):
            deprecation_message = ('you can simply pass True to '
                                   "source_comments instead of " +
                                   repr(source_comments))
            source_comments = True
        else:
            deprecation_message = ("you don't have to pass 'map' to "
                                   'source_comments but just need to '
                                   'specify source_map_filename')
            source_comments = False
        warnings.warn(
            "values like 'none', 'line_numbers', and 'map' for "
            'the source_comments parameter are deprecated; ' +
            deprecation_message, DeprecationWarning)
    if not isinstance(source_comments, bool):
        raise TypeError('source_comments must be bool, not ' +
                        repr(source_comments))
    fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()

    def _get_file_arg(key):
        ret = kwargs.pop(key, None)
        if ret is not None and not isinstance(ret, string_types):
            raise TypeError('{} must be a string, not {!r}'.format(key, ret))
        elif isinstance(ret, text_type):
            ret = ret.encode(fs_encoding)
        if ret and 'filename' not in modes:
            raise CompileError(
                '{} is only available with filename= keyword argument since '
                'has to be aware of it'.format(key))
        return ret

    source_map_filename = _get_file_arg('source_map_filename')
    output_filename_hint = _get_file_arg('output_filename_hint')

    # #208: cwd is always included in include paths
    include_paths = (os.getcwd(), )
    include_paths += tuple(kwargs.pop('include_paths', ()) or ())
    include_paths = os.pathsep.join(include_paths)
    if isinstance(include_paths, text_type):
        include_paths = include_paths.encode(fs_encoding)

    custom_functions = kwargs.pop('custom_functions', ())
    if isinstance(custom_functions, collections.Mapping):
        custom_functions = [
            SassFunction.from_lambda(name, lambda_)
            for name, lambda_ in custom_functions.items()
        ]
    elif isinstance(custom_functions, (collections.Set, collections.Sequence)):
        custom_functions = [
            func if isinstance(func, SassFunction) else
            SassFunction.from_named_function(func) for func in custom_functions
        ]
    else:
        raise TypeError(
            'custom_functions must be one of:\n'
            '- a set/sequence of {0.__module__}.{0.__name__} objects,\n'
            '- a mapping of function name strings to lambda functions,\n'
            '- a set/sequence of named functions,\n'
            'not {1!r}'.format(SassFunction, custom_functions))

    importers = _validate_importers(kwargs.pop('importers', None))

    if 'string' in modes:
        string = kwargs.pop('string')
        if isinstance(string, text_type):
            string = string.encode('utf-8')
        indented = kwargs.pop('indented', False)
        if not isinstance(indented, bool):
            raise TypeError('indented must be bool, not ' +
                            repr(source_comments))
        _check_no_remaining_kwargs(compile, kwargs)
        s, v = _sass.compile_string(
            string,
            output_style,
            source_comments,
            include_paths,
            precision,
            custom_functions,
            indented,
            importers,
        )
        if s:
            return v.decode('utf-8')
    elif 'filename' in modes:
        filename = kwargs.pop('filename')
        if not isinstance(filename, string_types):
            raise TypeError('filename must be a string, not ' + repr(filename))
        elif not os.path.isfile(filename):
            raise IOError('{0!r} seems not a file'.format(filename))
        elif isinstance(filename, text_type):
            filename = filename.encode(fs_encoding)
        _check_no_remaining_kwargs(compile, kwargs)
        s, v, source_map = _sass.compile_filename(
            filename,
            output_style,
            source_comments,
            include_paths,
            precision,
            source_map_filename,
            custom_functions,
            importers,
            output_filename_hint,
        )
        if s:
            v = v.decode('utf-8')
            if source_map_filename:
                source_map = source_map.decode('utf-8')
                v = v, source_map
            return v
    elif 'dirname' in modes:
        try:
            search_path, output_path = kwargs.pop('dirname')
        except ValueError:
            raise ValueError('dirname must be a pair of (source_dir, '
                             'output_dir)')
        _check_no_remaining_kwargs(compile, kwargs)
        s, v = compile_dirname(
            search_path,
            output_path,
            output_style,
            source_comments,
            include_paths,
            precision,
            custom_functions,
            importers,
        )
        if s:
            return
    else:
        raise TypeError('something went wrong')
    assert not s
    raise CompileError(v)
#-*- coding: gbk -*-
# encoding =gbk
import sys
#sys.setdefaultencoding('utf-8')
if sys.getdefaultencoding() != 'gbk':
    reload(sys)
    sys.setdefaultencoding('gbk')
import os
import gzip
import time


def read_gz_file(path):
    if os.path.exists(path):
        with gzip.open(path, 'rb') as pf:
            for line in pf:
                yield line
    else:
        print('the path [{}] is not exist!'.format(path))


def get_gz_fileName(path):
    if os.path.exists(path):
        gz_fileName = list()
        for root, dirs, files in os.walk(path):
            for gzfile in files:
                if os.path.splitext(gzfile)[1] == '.gz':
                    gz_fileName.append(gzfile)
    return gz_fileName

Beispiel #57
0
def get_best_encoding(stream):
    """Returns the default stream encoding if not found."""
    rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
    if is_ascii_encoding(rv):
        return "utf-8"
    return rv
Beispiel #58
0
 def __str__(self):
     enc = sys.stdin.encoding or sys.getdefaultencoding()
     return self.value.encode(enc, "replace")
Beispiel #59
0
def get_filesystem_encoding():
    return sys.getfilesystemencoding() or sys.getdefaultencoding()
Beispiel #60
0
import hashlib
from pathlib import Path
from sys import getdefaultencoding
from tempfile import NamedTemporaryFile

import pytest

FIXTURES_ROOT = Path(__file__).absolute().parent.joinpath('fixtures')

SOURCE_FILE_NAME = 'data.csv'
DESTINATION_FILE_NAME = 'data.json'

CSV_ENCODING = getdefaultencoding()

SOURCE_FILE_PATH = FIXTURES_ROOT.joinpath(SOURCE_FILE_NAME)
DESTINATION_FILE_PATH = FIXTURES_ROOT.joinpath(DESTINATION_FILE_NAME)


@pytest.fixture
def data_csv_path():
    return SOURCE_FILE_PATH


@pytest.fixture
def data_json_path():
    return DESTINATION_FILE_PATH


@pytest.fixture
def named_temporary_file():
    return NamedTemporaryFile(mode='wt')