示例#1
1
def expanduser_unicode(s):
    # expanduser() doesn't handle non-ascii characters in environment variables
    # https://gna.org/bugs/index.php?17111
    s = s.encode(sys.getfilesystemencoding())
    s = os.path.expanduser(s)
    s = s.decode(sys.getfilesystemencoding())
    return s
示例#2
0
def shooter_downloader(file_path):
    """ see https://docs.google.com/document/d/1ufdzy6jbornkXxsD-OGl3kgWa4P9WO5NZb6_QYZiGI0/preview
    """
    resp = request(SHOOTER_URL,
                   data={'filehash': shooter_hash(file_path), 'pathinfo': basename(file_path), 'format': 'json'})
    try:
        r_json = json_loads(resp)
    except:
        print '射手网没有找到字幕'.encode(getfilesystemencoding())
        return False
    else:
        f_name, file_extension = splitext(file_path)
        result = []
        for info in r_json:
            for f_info in info['Files']:
                # 不下载idx和sub版本的字幕
                if f_info['Ext'] not in ('sub', 'idx'):
                    result.append((f_info['Link'], f_info['Ext']))
        if len(result) < 1:
            print '射手网没有找到字幕'.encode(getfilesystemencoding())
            return False
        elif len(result) == 1:
            urlretrieve(result[0][0], filename='{}.{}'.format(f_name, result[0][1]))
            print '字幕下载完成'.encode(getfilesystemencoding())
        else:
            for idx, value in enumerate(result):
                urlretrieve(value[0], filename='{}_{}.{}'.format(f_name, idx + 1, value[1]))
                print '第{}个字幕下载完成'.format(idx + 1).encode(getfilesystemencoding())
        return True
示例#3
0
 def test_read_bytes_name(self):
     history.append('fred')
     history.append('wilma')
     history.write_file(bytes('my_history', sys.getfilesystemencoding()), raise_exc=True)
     history.clear()
     history.read_file(bytes('my_history', sys.getfilesystemencoding()), raise_exc=True)
     self.assertEqual(len(history), 2)
	def show(self):

		try:
			# Initialize/Refresh the view
			if self.refreshitems():
				if self.Show() < 0: return False
			#self.Refresh()
		except:
			traceback.print_exc()
		

		# Attempt to open the view
		



		if self.cmd_Items_SaveAs == None:
			self.cmd_Items_SaveAs = self.AddCommand("导出全部文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU | idaapi.CHOOSER_NO_SELECTION, icon=139)
		
		if self.cmd_Item_SaveAs == None:
			self.cmd_Item_SaveAs = self.AddCommand("导出文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU, icon=139)
		
		if self.cmd_Item_ReplaceBy == None:
			self.cmd_Item_ReplaceBy = self.AddCommand("替换文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU, icon=139)

		return True
示例#5
0
    def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None,
                             pgo_step_name=None, _build_ext=build_ext):
        self._clear_distutils_mkpath_cache()
        dist = Distribution()
        config_files = dist.find_config_files()
        try:
            config_files.remove('setup.cfg')
        except ValueError:
            pass
        dist.parse_config_files(config_files)

        if not temp_dir:
            temp_dir = lib_dir
        add_pgo_flags = self._add_pgo_flags

        if pgo_step_name:
            base_build_ext = _build_ext
            class _build_ext(_build_ext):
                def build_extensions(self):
                    add_pgo_flags(self, pgo_step_name, temp_dir)
                    base_build_ext.build_extensions(self)

        build_extension = _build_ext(dist)
        build_extension.finalize_options()
        if temp_dir:
            temp_dir = py3compat.cast_bytes_py2(temp_dir, encoding=sys.getfilesystemencoding())
            build_extension.build_temp = temp_dir
        if lib_dir:
            lib_dir = py3compat.cast_bytes_py2(lib_dir, encoding=sys.getfilesystemencoding())
            build_extension.build_lib = lib_dir
        if extension is not None:
            build_extension.extensions = [extension]
        return build_extension
示例#6
0
文件: k4mutils.py 项目: RudiKe/bin
def getKindleInfoFiles(kInfoFiles):
    # first search for current .kindle-info files
    home = os.getenv('HOME')
    cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
    cmdline = cmdline.encode(sys.getfilesystemencoding())
    p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
    out1, out2 = p1.communicate()
    reslst = out1.split('\n')
    kinfopath = 'NONE'
    found = False
    for resline in reslst:
        if os.path.isfile(resline):
            kInfoFiles.append(resline)
            found = True
    # add any .kinf files 
    cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
    cmdline = cmdline.encode(sys.getfilesystemencoding())
    p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
    out1, out2 = p1.communicate()
    reslst = out1.split('\n')
    for resline in reslst:
        if os.path.isfile(resline):
            kInfoFiles.append(resline)
            found = True
    if not found:
        print('No kindle-info files have been found.')
    return kInfoFiles
示例#7
0
文件: RHVoice.py 项目: nano13/nvcli
 def __init__(self):
     self.__lib=ctypes.CDLL(lib_path.encode(sys.getfilesystemencoding()))
     self.__lib.RHVoice_create_voice.argtypes=(c_char_p,)
     self.__lib.RHVoice_create_voice.restype=POINTER(cst_voice)
     self.__lib.RHVoice_delete_voice.argtypes=(POINTER(cst_voice),)
     self.__lib.RHVoice_synth_text.argtypes=(c_char_p,POINTER(cst_voice))
     self.__lib.RHVoice_load_user_dict.argtypes=(POINTER(cst_voice),c_char_p)
     self.__lib.new_audio_streaming_info.restype=POINTER(cst_audio_streaming_info)
     self.__lib.audio_streaming_info_val.argtypes=(POINTER(cst_audio_streaming_info),)
     self.__lib.audio_streaming_info_val.restype=c_void_p
     self.__lib.uttfunc_val.argtypes=(cst_uttfunc,)
     self.__lib.uttfunc_val.restype=c_void_p
     self.__lib.feat_set_int.argtypes=(c_void_p,c_char_p,c_int)
     self.__lib.feat_set_float.argtypes=(c_void_p,c_char_p,c_float)
     self.__lib.feat_set_string.argtypes=(c_void_p,c_char_p,c_char_p)
     self.__lib.feat_set.argtypes=(c_void_p,c_char_p,c_void_p)
     self.__voice=Voice(self.__lib)
     if os.path.isfile(userdict_path):
         self.__lib.RHVoice_load_user_dict(self.__voice,userdict_path.encode(sys.getfilesystemencoding()))
     self.__player=nvwave.WavePlayer(channels=1,samplesPerSec=16000,bitsPerSample=16,outputDevice=config.conf["speech"]["outputDevice"])
     self.__silence_flag=threading.Event()
     self.__audio_callback=AudioCallback(self.__lib,self.__player,self.__silence_flag)
     self.__tts_queue=Queue.Queue()
     self.__tts_thread=TTSThread(self.__lib,self.__tts_queue,self.__voice,self.__audio_callback,self.__silence_flag)
     self.__native_rate=1.0
     self.__native_pitch=1.0
     self.__native_volume=1.0
     self._availableVariants=[VoiceInfo("pseudo-english",u"псевдо-английский"),VoiceInfo("russian",u"русский")]
     self.__variant="pseudo-english"
     self.__tts_thread.start()
示例#8
0
文件: fts.py 项目: abarnert/scandir
def _make_path(ent):
    if isinstance(ent, Entry):
        ent = ent.ftsent
    if ent.fts_path:
        return ffi.string(ent.fts_path).decode(sys.getfilesystemencoding())
    else:
        return ffi.string(ent.fts_name).decode(sys.getfilesystemencoding())
示例#9
0
    def on_run(self):
        target_dir = self.target_dir.get().decode(sys.getfilesystemencoding())
        if len(target_dir) == 0:
            tkMessageBox.showinfo(title="No target directory",
                                  message="No target directory selected.")
            return
        input_paths = self.input_dirs.get(0, Tkinter.END)
        input_paths = [ip.decode(sys.getfilesystemencoding())
                       for ip in input_paths]
        no_length_patch = bool(self.skip_ogg_patch.get())

        # Disable GUI
        self.disable()

        logger = ThreadQueueLogger()

        # To avoid locking the GUI, run execution in another thread.
        thread = threading.Thread(
            target=r21buddy.run,
            args=(target_dir, input_paths),
            kwargs={"length_patch": (not no_length_patch), "verbose": True,
                    "ext_logger": logger})
        thread.start()

        # Initiate a polling function which will update until the
        # thread finishes.
        self._on_run(thread, logger)
示例#10
0
def _main(argv):
    import optparse

    usage = "usage: %prog [options] <file1 file2 ...>\n<stdin> will be used as input source if no file specified."
    
    parser = optparse.OptionParser(usage=usage, version="%%prog %s @ Copyright %s" % (__version__, __copyright__))
    parser.add_option('-t', '--target-language', metavar='zh-CN',
                      help='specify target language to translate the source text into')
    parser.add_option('-s', '--source-language', default='', metavar='en',
                      help='specify source language, if not provide it will identify the source language automatically')
    parser.add_option('-i', '--input-encoding', default=sys.getfilesystemencoding(), metavar='utf-8',
                      help='specify input encoding, default to current console system encoding')
    parser.add_option('-o', '--output-encoding', default=sys.getfilesystemencoding(), metavar='utf-8',
                      help='specify output encoding, default to current console system encoding')

    options, args = parser.parse_args(argv[1:])
    
    if not options.target_language:
        print('Error: missing target language!')
        parser.print_help()
        return
    
    gs = Goslate()
    import fileinput
    # inputs = fileinput.input(args, mode='rU', openhook=fileinput.hook_encoded(options.input_encoding))
    inputs = fileinput.input(args, mode='rb')
    inputs = (i.decode(options.input_encoding) for i in inputs)
    outputs = gs.translate(inputs, options.target_language, options.source_language)
    for i in outputs:
        sys.stdout.write((i+u'\n').encode(options.output_encoding))
        sys.stdout.flush()
示例#11
0
    def __init__(self, arg_list, env, listener,
            # "path" is an option in build systems
            path='',
            # "shell" is an options in build systems
            shell=False):

        self.listener = listener
        self.killed = False

        self.start_time = time.time()

        # Set temporary PATH to locate executable in arg_list
        if path:
            old_path = os.environ["PATH"]
            # The user decides in the build system whether he wants to append $PATH
            # or tuck it at the front: "$PATH;C:\\new\\path", "C:\\new\\path;$PATH"
            os.environ["PATH"] = os.path.expandvars(path).encode(sys.getfilesystemencoding())

        proc_env = os.environ.copy()
        proc_env.update(env)
        for k, v in proc_env.iteritems():
            proc_env[k] = os.path.expandvars(v).encode(sys.getfilesystemencoding())

        self.proc = subprocess.Popen(arg_list, env=proc_env, shell=shell)

        if path:
            os.environ["PATH"] = old_path
示例#12
0
文件: rrp.py 项目: skelsec/impacket
def packValue(valueType, value):
    if valueType == REG_DWORD:
        retData = pack('<L', value)
    elif valueType == REG_DWORD_BIG_ENDIAN:
        retData = pack('>L', value)
    elif valueType == REG_EXPAND_SZ:
        try:
            retData = value.encode('utf-16le')
        except UnicodeDecodeError:
            import sys
            retData = value.decode(sys.getfilesystemencoding()).encode('utf-16le')
    elif valueType == REG_MULTI_SZ:
        try:
            retData = value.encode('utf-16le')
        except UnicodeDecodeError:
            import sys
            retData = value.decode(sys.getfilesystemencoding()).encode('utf-16le')
    elif valueType == REG_QWORD:
        retData = pack('<Q', value)
    elif valueType == REG_QWORD_LITTLE_ENDIAN:
        retData = pack('>Q', value)
    elif valueType == REG_SZ:
        try:
            retData = value.encode('utf-16le')
        except UnicodeDecodeError:
            import sys
            retData = value.decode(sys.getfilesystemencoding()).encode('utf-16le')
    else:
        retData = value

    return retData
示例#13
0
    def _run(cmds):
        #~ print cmds
        cmdline = str( [" ".join(cmds)] ) # to output easily (with strange chars)
        try:
            cmds = [i.encode(sys.getfilesystemencoding()) for i in cmds]
        except:
            raise CommandException( cmdline +"\n encoding trouble")

        p = Popen(cmds, shell=False,stdout=PIPE,stderr=PIPE)
        time.sleep(0.01)    # to avoid "IOError: [Errno 4] Interrupted system call"
        out = string.join(p.stdout.readlines() ).strip()
        outerr = string.join(p.stderr.readlines() ).strip()

        if "exiftran" in cmdline:
            if "processing" in outerr:
                # exiftran output process in stderr ;-(
                outerr=""

        if outerr:
            raise CommandException( cmdline +"\n OUTPUT ERROR:"+outerr)
        else:
            try:
                out = out.decode("utf_8") # recupere les infos en UTF_8
            except:
                try:
                    out = out.decode("latin_1")  # recupere les anciens infos (en latin_1)
                except UnicodeDecodeError:
                    try:
                        out = out.decode(sys.getfilesystemencoding())
                    except UnicodeDecodeError:
                        raise CommandException( cmdline +"\n decoding trouble")

            return out #unicode
示例#14
0
def expanduser_unicode(s):
    """Expands a ~/ on the front of a unicode path, where meaningful.

    :param s: path to expand, coercable to unicode
    :returns: The expanded path
    :rtype: unicode

    This doesn't do anything on the Windows platform other than coerce
    its argument to unicode. On other platforms, it converts a "~"
    component on the front of a relative path to the user's absolute
    home, like os.expanduser().

    Certain workarounds for OS and filesystem encoding issues are
    implemented here too.

    """
    s = unicode(s)
    # The sys.getfilesystemencoding() on Win32 (mbcs) is for encode
    # only, and isn't roundtrippable. Luckily ~ is not meaningful on
    # Windows, and MyPaint uses a better default for the scrap prefix on
    # the Windows platform anyway.
    if sys.platform == "win32":
        return s
    # expanduser() doesn't handle non-ascii characters in environment variables
    # https://gna.org/bugs/index.php?17111
    s = s.encode(sys.getfilesystemencoding())
    s = os.path.expanduser(s)
    s = s.decode(sys.getfilesystemencoding())
    return s
示例#15
0
文件: files.py 项目: 08haozi/uliweb
def encode_filename(filename, from_encoding='utf-8', to_encoding=None):
    """
    >>> print encode_filename('\xe4\xb8\xad\xe5\x9b\xbd.doc')
    \xd6\xd0\xb9\xfa.doc
    >>> f = unicode('\xe4\xb8\xad\xe5\x9b\xbd.doc', 'utf-8')
    >>> print encode_filename(f)
    \xd6\xd0\xb9\xfa.doc
    >>> print encode_filename(f.encode('gbk'), 'gbk')
    \xd6\xd0\xb9\xfa.doc
    >>> print encode_filename(f, 'gbk', 'utf-8')
    \xe4\xb8\xad\xe5\x9b\xbd.doc
    >>> print encode_filename('\xe4\xb8\xad\xe5\x9b\xbd.doc', 'utf-8', 'gbk')
    \xd6\xd0\xb9\xfa.doc
    
    """
    import sys
    to_encoding = to_encoding or sys.getfilesystemencoding()
    from_encoding = from_encoding or sys.getfilesystemencoding()
    if not isinstance(filename, unicode):
        try:
            f = unicode(filename, from_encoding)
        except UnicodeDecodeError:
            try:
                f = unicode(filename, 'utf-8')
            except UnicodeDecodeError:
                raise Exception, "Unknown encoding of the filename %s" % filename
        filename = f
    if to_encoding:
        return filename.encode(to_encoding)
    else:
        return filename
示例#16
0
    def stage(self, fs_paths):
        """Stage a set of paths.

        :param fs_paths: List of paths, relative to the repository path
        """

        root_path_bytes = self.path.encode(sys.getfilesystemencoding())

        if not isinstance(fs_paths, list):
            fs_paths = [fs_paths]
        from dulwich.index import (
            blob_from_path_and_stat,
            index_entry_from_stat,
            _fs_to_tree_path,
            )
        index = self.open_index()
        for fs_path in fs_paths:
            if not isinstance(fs_path, bytes):
                fs_path = fs_path.encode(sys.getfilesystemencoding())
            tree_path = _fs_to_tree_path(fs_path)
            full_path = os.path.join(root_path_bytes, fs_path)
            try:
                st = os.lstat(full_path)
            except OSError:
                # File no longer exists
                try:
                    del index[tree_path]
                except KeyError:
                    pass  # already removed
            else:
                blob = blob_from_path_and_stat(full_path, st)
                self.object_store.add_object(blob)
                index[tree_path] = index_entry_from_stat(st, blob.id, 0)
        index.write()
示例#17
0
def DownloadSong(artist, title, songsdir='Top100'):
    """
    arguments:
        artist (unicode)
        title (unicode)
        songdir (unicode)
    """
    filename = u'%s-%s.mp3' %(artist, title)
    if title == '':
        return
    if artist == '':
        filename = u'%s.mp3' %title
    filename = filename.encode(sys.getfilesystemencoding())
    songdir = songsdir.encode(sys.getfilesystemencoding())

    if os.path.exists(os.path.join(songsdir, filename)) or os.path.exists(filename):
        print u"已经成功下载《%s - %s》"%(artist, title)
        return
    
    print u"准备下载《%s - %s》..." %(artist, title)
    print u'正在选取最快的URL:'
    #fakeurls = __getFakeURLs(artist, title)
    url = GetBestUrl(artist, title)
    print url.decode('gbk', 'ignore')
    try:
        MyHttpGet(url, filename, 3)
    except URLUnreachable:
        print u"Sorry, 目前并没有为(%s - %s)找到合适的下载资源,\n您可以手动下载或稍候再试。" %(artist, title)
    except KeyboardInterrupt:
        print u'Exiting...'
示例#18
0
文件: crash.py 项目: ndparker/wtf
    def __init__(self, config, opts, args):
        """ :See: `wtf.services.ServiceInterface.__init__` """
        section = config.crash
        self._debug = bool(section('debug', False))
        if 'display' in section:
            fname, status = section.display.template, section.display.status
            fp = open(fname, 'rb')
            try:
                page = fp.read()
                mtime = _os.fstat(fp.fileno()).st_mtime
            finally:
                fp.close()
            self._display = page, status, fname, mtime
        if 'dump' in section:
            self._perms = int(section.dump('perms', '0644'), 8)
            self._dumpdir = _os.path.join(config.ROOT, unicode(
                section.dump.directory
            ).encode(_sys.getfilesystemencoding()))
            try:
                self._dumpdir_unicode = self._dumpdir.decode(
                    _sys.getfilesystemencoding()
                )
            except UnicodeError:
                self._dumpdir_unicode = self._dumpdir.decode('latin-1')

            # check write access
            fp, name = self._make_dumpfile()
            try:
                fp.write("!")
            finally:
                try:
                    fp.close()
                finally:
                    _os.unlink(name)
示例#19
0
def opensesame_folder():

	"""
	Determines the folder that contains the OpenSesame executable. This is only
	applicable under Windows.

	Returns:
	The OpenSesame folder or None if the os is not Windows.
	"""
	# Determines the directory name of the script or the directory name
	# of the executable after being packaged with py2exe. This has to be
	# done so the child process can find all relevant modules too.
	# See http://www.py2exe.org/index.cgi/HowToDetermineIfRunningFromExe
	#
	# There are two scenarios: Either OpenSesame is run from a frozen state,
	# in which case the OpenSesame folder is the folder containing the
	# executable, or OpenSesame is run from source, in which case we go to
	# the OpenSesame folder by going two levels up from the __file__ folder.
	if platform.system() == u'Darwin':
		return os.getcwd()
	elif platform.system() == u'Windows':
		import imp
		if (hasattr(sys, u'frozen') or hasattr(sys, u'importers') or \
			imp.is_frozen(u'__main__')):
			path = safe_decode(os.path.dirname(sys.executable),
				enc=sys.getfilesystemencoding())
		else:
			# To get the opensesame folder, simply jump to levels up
			path = safe_decode(os.path.dirname(__file__),
				enc=sys.getfilesystemencoding())
			path = os.path.normpath(os.path.join(path, u'..'))
		return path
	else:
		return None
示例#20
0
    def requestAvatarId(self, credentials):
        # We get bytes, but the Py3 pwd module uses str. So attempt to decode
        # it using the same method that CPython does for the file on disk.
        if _PY3:
            username = credentials.username.decode(sys.getfilesystemencoding())
            password = credentials.password.decode(sys.getfilesystemencoding())
        else:
            username = credentials.username
            password = credentials.password

        for func in self._getByNameFunctions:
            try:
                pwnam = func(username)
            except KeyError:
                return defer.fail(UnauthorizedLogin("invalid username"))
            else:
                if pwnam is not None:
                    crypted = pwnam[1]
                    if crypted == '':
                        continue

                    if verifyCryptedPassword(crypted, password):
                        return defer.succeed(credentials.username)
        # fallback
        return defer.fail(UnauthorizedLogin("unable to verify password"))
def encodeASCII(string, language=None): #from Unicodize and plex scanner and other sources
  if string=="": return ""
  ranges = [ {"from": ord(u"\u3300"), "to": ord(u"\u33ff")}, {"from": ord(u"\ufe30"), "to": ord(u"\ufe4f")}, {"from": ord(u"\uf900"), "to": ord(u"\ufaff")},  # compatibility ideographs
             {"from": ord(u"\u30a0"), "to": ord(u"\u30ff")}, {"from": ord(u"\u2e80"), "to": ord(u"\u2eff")},                                                  # Japanese Kana    # cjk radicals supplement
             {"from": ord(u"\u4e00"), "to": ord(u"\u9fff")}, {"from": ord(u"\u3400"), "to": ord(u"\u4dbf")}]                                                  # windows: TypeError: ord() expected a character, but string of length 2 found #{"from": ord(u"\U00020000"), "to": ord(u"\U0002a6df")}, #{"from": ord(u"\U0002a700"), "to": ord(u"\U0002b73f")}, #{"from": ord(u"\U0002b740"), "to": ord(u"\U0002b81f")}, #{"from": ord(u"\U0002b820"), "to": ord(u"\U0002ceaf")}, # included as of Unicode 8.0                             #{"from": ord(u"\U0002F800"), "to": ord(u"\U0002fa1f")}  # compatibility ideographs
  encodings, encoding = ['iso8859-1', 'utf-16', 'utf-16be', 'utf-8'], ord(string[0])                                                                          #
  if 0 <= encoding < len(encodings):  string = string[1:].decode('cp949') if encoding == 0 and language == 'ko' else string[1:].decode(encodings[encoding])   # If we're dealing with a particular language, we might want to try another code page.
  if sys.getdefaultencoding() not in encodings:
    try:     string = string.decode(sys.getdefaultencoding())
    except:  pass
  if not sys.getfilesystemencoding()==sys.getdefaultencoding():
    try:     string = string.decode(sys.getfilesystemencoding())
    except:  pass
  string = string.strip('\0')
  try:       string = unicodedata.normalize('NFKD', string)    # Unicode  to ascii conversion to corect most characters automatically
  except:    pass
  try:       string = re.sub(RE_UNICODE_CONTROL, '', string)   # Strip control characters.
  except:    pass
  try:       string = string.encode('ascii', 'replace')        # Encode into Ascii
  except:    pass
  original_string, string, i = string, list(string), 0
  while i < len(string):                                       ### loop through unicode and replace special chars with spaces then map if found ###
    if ord(string[i])<128:  i = i+1
    else: #non ascii char
      char, char2, char3, char_len = 0, "", [], unicodeLen(string[i])
      for x in range(0, char_len):
        char = 256*char + ord(string[i+x]); char2 += string[i+x]; char3.append(string[i+x])
        if not x==0: string[i] += string[i+x]; string[i+x]=''
      try:    asian_language = any([mapping["from"] <= ord("".join(char3).decode('utf8')) <= mapping["to"] for mapping in ranges])
      except: asian_language = False
      if char in CHARACTERS_MAP:  string[i]=CHARACTERS_MAP.get( char )
      elif not asian_language:    Log("*Character missing in CHARACTERS_MAP: %d:'%s'  , #'%s' %s, string: '%s'" % (char, char2, char2, char3, original_string))
      i += char_len
  return ''.join(string)
示例#22
0
def run():
    global mw
    from anki.utils import isWin, isMac

    # on osx we'll need to add the qt plugins to the search path
    if isMac and getattr(sys, 'frozen', None):
        rd = os.path.abspath(moduleDir + "/../../..")
        QCoreApplication.setLibraryPaths([rd])

    # create the app
    app = AnkiApp(sys.argv)
    QCoreApplication.setApplicationName("Anki")
    if app.secondInstance():
        # we've signaled the primary instance, so we should close
        return

    # parse args
    opts, args = parseArgs(sys.argv)
    opts.base = unicode(opts.base or "", sys.getfilesystemencoding())
    opts.profile = unicode(opts.profile or "", sys.getfilesystemencoding())

    # profile manager
    from aqt.profiles import ProfileManager
    pm = ProfileManager(opts.base, opts.profile)

    # i18n
    setupLang(pm, app, opts.lang)

    # remaining pm init
    pm.ensureProfile()

    # load the main window
    import aqt.main
    mw = aqt.main.AnkiQt(app, pm, args)
    app.exec_()
示例#23
0
def home():
	if name == 'nt':
		return unicode(environ['USERPROFILE'], getfilesystemencoding())
	elif platform == "darwin":	
		return unicode(environ['HOME'], getfilesystemencoding())
	else:
		return unicode(environ['HOME'], getfilesystemencoding())
示例#24
0
	def pack(self, dst, src, src_path):
		chrome_path = self.get_chrome_path()
		if chrome_path is not None:
			extension_path = os.path.abspath(src)
			certificate_path = os.path.abspath(os.path.join(extension_path, '../', '../', 'certificates'))
			if not os.path.exists(certificate_path):
				os.makedirs(certificate_path)
			certificate_path = os.path.join(certificate_path, 'chrome.pem')

			cmd = chrome_path + ' --pack-extension="' + extension_path + '"'
			if os.path.isfile(certificate_path):
				cmd += ' --pack-extension-key="' + certificate_path + '"'
			cmd += ' --no-message-box'
			os.system(cmd.encode(sys.getfilesystemencoding()))

			extension_dst = os.path.abspath(os.path.join(extension_path, '../', 'chrome.crx'))
			if not os.path.isfile(extension_dst):
				cmd = '"' + chrome_path + '"' + ' --pack-extension="' + extension_path + '"'
				if os.path.isfile(certificate_path):
					cmd += ' --pack-extension-key="' + certificate_path + '"'
				cmd += ' --no-message-box'
				subprocess.call(cmd.encode(sys.getfilesystemencoding()))
			try:
				shutil.move(os.path.abspath(os.path.join(extension_path, '../', 'chrome.pem')), certificate_path)
			except:
				pass
			shutil.move(extension_dst, os.path.join(dst, self.get_full_package_name(self._info)))
		else:
			kango.log('Chrome/Chromium is not installed, can\'t pack chrome extension.')
示例#25
0
文件: data.py 项目: pycom/EricShort
    def read_fileobj(self, file_obj):
        """Read the coverage data from the given file object.

        Should only be used on an empty CoverageData object.

        """
        data = self._read_raw_data(file_obj)

        self._lines = self._arcs = None

        if 'lines' in data:
            self._lines = dict(
                (fname.encode(sys.getfilesystemencoding()), linenos)
                for fname, linenos in iitems(data['lines'])
            )

        if 'arcs' in data:
            self._arcs = dict(
                (fname.encode(sys.getfilesystemencoding()),
                    [tuple(pair) for pair in arcs])
                for fname, arcs in iitems(data['arcs'])
            )
        self._file_tracers = data.get('file_tracers', {})
        self._runs = data.get('runs', [])

        self._validate()
 def refresh_cb(self, *args):
     self.popup_menu.popdown()
     mm = Gtk.Menu()
     oo = Gtk.Menu()
     for i in os.listdir(mount_prefix):
         mp = os.path.join(mount_prefix,i)
         if (os.path.ismount(mp)): 
             j = Gtk.MenuItem(i.decode(sys.getfilesystemencoding())) 
             o = Gtk.MenuItem(i.decode(sys.getfilesystemencoding())) 
             j.connect('activate', self.umount_cb, i)
             o.connect('activate', lambda a: run_file_man(mp))
             mm.add(j)
             oo.add(o) 
         mm.add(Gtk.SeparatorMenuItem())
         oo.add(Gtk.SeparatorMenuItem())
     i = Gtk.ImageMenuItem(Gtk.STOCK_REFRESH)
     i.connect('activate', self.refresh_cb)
     mm.add(i)
     i = Gtk.ImageMenuItem(Gtk.STOCK_REFRESH)
     i.connect('activate', self.refresh_cb)
     oo.add(i)
     mounted_menu = mm
     open_menu = oo
     g = self.open_menu_item.get_submenu()
     s = self.umount_menu_item.get_submenu()
     self.umount_menu_item.set_submenu(mm)
     self.open_menu_item.set_submenu(oo)
     del s, g
示例#27
0
def main():
    args = sys.argv
    cur_sys = system()
    file_name = GLOBALS["db_name"]+"-"+str(options.port)+".log"
    # if cur_sys == "Darwin":
    #     f = "/Users/"+os.getlogin()+"/Desktop/"+ file_name
    # elif cur_sys == "Linux":
    #     f = os.getcwd() + "/" + file_name
    # else:
    #     raise NotImplementedError
    # args.append("--log_file_prefix=" + f)
    # logging.basicConfig(filename=f, level=logging.DEBUG)

    tornado.options.parse_command_line()
    applicaton = Application()
    http_server = tornado.httpserver.HTTPServer(applicaton, xheaders=True)

    http_server.listen(options.port)
    print("="*50)
    print("initializing program with port : ", options.port)
    print("="*50)
    print("my ip is : ", socket.gethostbyname(socket.gethostname()))
    print("="*50)
    print("File system DEFAULT Encoding-type is : ", sys.getdefaultencoding())
    print("File system Encoding-type is : ", sys.getfilesystemencoding())
    print("="*50)
    logging.info("File system DEFAULT Encoding-type is : " + str(sys.getdefaultencoding()))
    logging.info("File system Encoding-type is : " +  str(sys.getfilesystemencoding()))

    ioloop = tornado.ioloop.IOLoop.instance()
    ioloop.start()
示例#28
0
    def execute(self, content):
        command = self.get_link_command()
        if not command:
            sublime.error_message("Could not get link opener command.\nPlatform not yet supported.")
            return None

        content = content.encode(sys.getfilesystemencoding())
        cmd = command + [content]
        arg_list_wrapper = self.settings.get("orgmode.open_link.resolver.abstract.arg_list_wrapper", [])
        if arg_list_wrapper:  # NOTE never use shell=True below.
            cmd = arg_list_wrapper + [" ".join(cmd)]
            source_filename = '"' + self.view.file_name() + '"'
            cmd += [source_filename]
            # TODO: hack here (outcommented)
            # cmd += ['--origin', source_filename, '--quiet']
        print "*****"
        print repr(content), content
        print repr(cmd)
        print cmd
        sublime.status_message("Executing: %s" % cmd)
        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)

        stdout, stderr = process.communicate()
        if stdout:
            stdout = unicode(stdout, sys.getfilesystemencoding())
            sublime.status_message(stdout)
        if stderr:
            stderr = unicode(stderr, sys.getfilesystemencoding())
            sublime.error_message(stderr)
示例#29
0
    def _setup_unix(self, bind, basedir=None):
        """
        Setup UNIX domain socket 

        :Parameters:
         - `bind`: Bind parameter accessor (``match.group``)
         - `basedir`: Basedir for relative paths

        :Types:
         - `bind`: ``callable``
         - `basedir`: ``str``
        """
        if _AF_UNIX is None:
            raise ConfigurationError("UNIX domain sockets are not available")

        obind = repr(bind(0))
        if bind(u'perm'):
            try:
                socket_perm = int(bind('perm'), 8)
            except (TypeError, ValueError):
                raise ConfigurationError("Invalid permission")
            umask = 0777 & ~socket_perm
        else:
            umask = None
        basedir = basedir or _os.getcwd()
        if not isinstance(basedir, unicode):
            basedir = basedir.decode(_sys.getfilesystemencoding())
        path = _os.path.normpath(_os.path.join(
            basedir, bind(u'path')
        )).encode(_sys.getfilesystemencoding())
        socket = _socket.socket(_AF_UNIX, _socket.SOCK_STREAM)
        self._sockets.append(UnixSocket(socket, obind, path, umask))
示例#30
0
文件: data.py 项目: pycom/EricShort
    def write_fileobj(self, file_obj):
        """Write the coverage data to `file_obj`."""

        # Create the file data.
        file_data = {}

        if self._has_arcs():
            file_data['arcs'] = dict(
                (fname.decode(sys.getfilesystemencoding()),
                    [tuple(pair) for pair in self._arcs])
                for fname, arcs in iitems(data['arcs'])
            )

        if self._has_lines():
            file_data['lines'] = dict(
                (fname.decode(sys.getfilesystemencoding()), linenos)
                for fname, linenos in iitems(self._lines)
            )

        if self._file_tracers:
            file_data['file_tracers'] = self._file_tracers

        if self._runs:
            file_data['runs'] = self._runs

        # Write the data to the file.
        file_obj.write(self._GO_AWAY)
        json.dump(file_data, file_obj)
    def test_issue5604(self):
        # Test cannot cover imp.load_compiled function.
        # Martin von Loewis note what shared library cannot have non-ascii
        # character because init_xxx function cannot be compiled
        # and issue never happens for dynamic modules.
        # But sources modified to follow generic way for processing pathes.

        # the return encoding could be uppercase or None
        fs_encoding = sys.getfilesystemencoding()

        # covers utf-8 and Windows ANSI code pages
        # one non-space symbol from every page
        # (http://en.wikipedia.org/wiki/Code_page)
        known_locales = {
            'utf-8': b'\xc3\xa4',
            'cp1250': b'\x8C',
            'cp1251': b'\xc0',
            'cp1252': b'\xc0',
            'cp1253': b'\xc1',
            'cp1254': b'\xc0',
            'cp1255': b'\xe0',
            'cp1256': b'\xe0',
            'cp1257': b'\xc0',
            'cp1258': b'\xc0',
        }

        if sys.platform == 'darwin':
            self.assertEqual(fs_encoding, 'utf-8')
            # Mac OS X uses the Normal Form D decomposition
            # http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
            special_char = b'a\xcc\x88'
        else:
            special_char = known_locales.get(fs_encoding)

        if not special_char:
            self.skipTest(
                "can't run this test with %s as filesystem encoding" %
                fs_encoding)
        decoded_char = special_char.decode(fs_encoding)
        temp_mod_name = 'test_imp_helper_' + decoded_char
        test_package_name = 'test_imp_helper_package_' + decoded_char
        init_file_name = os.path.join(test_package_name, '__init__.py')
        try:
            # if the curdir is not in sys.path the test fails when run with
            # ./python ./Lib/test/regrtest.py test_imp
            sys.path.insert(0, os.curdir)
            with open(temp_mod_name + '.py', 'w') as file:
                file.write('a = 1\n')
            file, filename, info = imp.find_module(temp_mod_name)
            with file:
                self.assertIsNotNone(file)
                self.assertTrue(filename[:-3].endswith(temp_mod_name))
                self.assertEqual(info[0], '.py')
                self.assertEqual(info[1], 'r')
                self.assertEqual(info[2], imp.PY_SOURCE)

                mod = imp.load_module(temp_mod_name, file, filename, info)
                self.assertEqual(mod.a, 1)

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                mod = imp.load_source(temp_mod_name, temp_mod_name + '.py')
            self.assertEqual(mod.a, 1)

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                if not sys.dont_write_bytecode:
                    mod = imp.load_compiled(
                        temp_mod_name,
                        imp.cache_from_source(temp_mod_name + '.py'))
            self.assertEqual(mod.a, 1)

            if not os.path.exists(test_package_name):
                os.mkdir(test_package_name)
            with open(init_file_name, 'w') as file:
                file.write('b = 2\n')
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                package = imp.load_package(test_package_name,
                                           test_package_name)
            self.assertEqual(package.b, 2)
        finally:
            del sys.path[0]
            for ext in ('.py', '.pyc'):
                support.unlink(temp_mod_name + ext)
                support.unlink(init_file_name + ext)
            support.rmtree(test_package_name)
            support.rmtree('__pycache__')
示例#32
0
def update():
    if not conf.updateAll:
        return

    success = False

    if not os.path.exists(os.path.join(paths.SQLMAP_ROOT_PATH, ".git")):
        warnMsg = "not a git repository. It is recommended to clone the 'sqlmapproject/sqlmap' repository "
        warnMsg += "from GitHub (e.g. 'git clone --depth 1 %s sqlmap')" % GIT_REPOSITORY
        logger.warn(warnMsg)

        if VERSION == getLatestRevision():
            logger.info("already at the latest revision '%s'" % getRevisionNumber())
            return

        message = "do you want to try to fetch the latest 'zipball' from repository and extract it (experimental) ? [y/N]"
        if readInput(message, default='N', boolean=True):
            directory = os.path.abspath(paths.SQLMAP_ROOT_PATH)

            try:
                open(os.path.join(directory, "sqlmap.py"), "w+b")
            except Exception as ex:
                errMsg = "unable to update content of directory '%s' ('%s')" % (directory, getSafeExString(ex))
                logger.error(errMsg)
            else:
                attrs = os.stat(os.path.join(directory, "sqlmap.py")).st_mode
                for wildcard in ('*', ".*"):
                    for _ in glob.glob(os.path.join(directory, wildcard)):
                        try:
                            if os.path.isdir(_):
                                shutil.rmtree(_)
                            else:
                                os.remove(_)
                        except:
                            pass

                if glob.glob(os.path.join(directory, '*')):
                    errMsg = "unable to clear the content of directory '%s'" % directory
                    logger.error(errMsg)
                else:
                    try:
                        archive = _urllib.request.urlretrieve(ZIPBALL_PAGE)[0]

                        with zipfile.ZipFile(archive) as f:
                            for info in f.infolist():
                                info.filename = re.sub(r"\Asqlmap[^/]+", "", info.filename)
                                if info.filename:
                                    f.extract(info, directory)

                        filepath = os.path.join(paths.SQLMAP_ROOT_PATH, "lib", "core", "settings.py")
                        if os.path.isfile(filepath):
                            with open(filepath, "rb") as f:
                                version = re.search(r"(?m)^VERSION\s*=\s*['\"]([^'\"]+)", f.read()).group(1)
                                logger.info("updated to the latest version '%s#dev'" % version)
                                success = True
                    except Exception as ex:
                        logger.error("update could not be completed ('%s')" % getSafeExString(ex))
                    else:
                        if not success:
                            logger.error("update could not be completed")
                        else:
                            try:
                                os.chmod(os.path.join(directory, "sqlmap.py"), attrs)
                            except OSError:
                                logger.warning("could not set the file attributes of '%s'" % os.path.join(directory, "sqlmap.py"))
    else:
        infoMsg = "updating sqlmap to the latest development revision from the "
        infoMsg += "GitHub repository"
        logger.info(infoMsg)

        debugMsg = "sqlmap will try to update itself using 'git' command"
        logger.debug(debugMsg)

        dataToStdout("\r[%s] [INFO] update in progress" % time.strftime("%X"))

        try:
            process = subprocess.Popen("git checkout . && git pull %s HEAD" % GIT_REPOSITORY, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=paths.SQLMAP_ROOT_PATH.encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
            pollProcess(process, True)
            stdout, stderr = process.communicate()
            success = not process.returncode
        except (IOError, OSError) as ex:
            success = False
            stderr = getSafeExString(ex)

        if success:
            logger.info("%s the latest revision '%s'" % ("already at" if "Already" in stdout else "updated to", getRevisionNumber()))
        else:
            if "Not a git repository" in stderr:
                errMsg = "not a valid git repository. Please checkout the 'sqlmapproject/sqlmap' repository "
                errMsg += "from GitHub (e.g. 'git clone --depth 1 %s sqlmap')" % GIT_REPOSITORY
                logger.error(errMsg)
            else:
                logger.error("update could not be completed ('%s')" % re.sub(r"\W+", " ", stderr).strip())

    if not success:
        if IS_WIN:
            infoMsg = "for Windows platform it's recommended "
            infoMsg += "to use a GitHub for Windows client for updating "
            infoMsg += "purposes (http://windows.github.com/) or just "
            infoMsg += "download the latest snapshot from "
            infoMsg += "https://github.com/sqlmapproject/sqlmap/downloads"
        else:
            infoMsg = "for Linux platform it's recommended "
            infoMsg += "to install a standard 'git' package (e.g.: 'sudo apt-get install git')"

        logger.info(infoMsg)
                                FINISHED, PRESSED, RELEASED, FOREVER)
import os
import sys
import numpy as np
from numpy import (sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad,
                   rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import psychopy.core

import time

#########################################

# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(
    sys.getfilesystemencoding())
os.chdir(_thisDir)

#input subject and partner ID; store values for later
gui = psychopy.gui.Dlg()
gui.addField("Subject ID: ")
gui.addField("Partner ID: ")
gui.show()
subID = gui.data[0]
partnerID = gui.data[1]

# Open output file and write header
output_name = './data/' + str(subID) + '_' + str(partnerID) + '.csv'
if os.path.exists(
        output_name):  #if path exists, remame it to avoid overwriting data
    newSubID = subID + "000"
示例#34
0
文件: python.py 项目: rajr0/ispyd
    def do_filesystemencoding(self, line):
        """filesystemencoding
        Display the file system encoding. This is the value available from
        'sys.filesystemencoding'."""

        print >> self.stdout, sys.getfilesystemencoding()
示例#35
0
from .btrfs import (lookup_ino_path_one, get_fsid, get_root_id,
                    get_root_generation, clone_data, defragment,
                    BTRFS_FIRST_FREE_OBJECTID)
from .datetime import system_now
from .dedup import ImmutableFDs, cmp_files
from .openat import fopenat, fopenat_rw
from .model import (Filesystem, Volume, Inode, comm_mappings, get_or_create,
                    DedupEvent, DedupEventInode, VolumePathHistory)
from sqlalchemy.sql import func, literal_column

BUFSIZE = 8192

WINDOW_SIZE = 1024

FS_ENCODING = sys.getfilesystemencoding()

# 32MiB, initial scan takes about 12', might gain 15837689948,
# sqlite takes 256k
DEFAULT_SIZE_CUTOFF = 32 * 1024**2
# about 12' again, might gain 25807974687
DEFAULT_SIZE_CUTOFF = 16 * 1024**2
# 13'40" (36' with a backup job running in parallel), might gain 26929240347,
# sqlite takes 758k
DEFAULT_SIZE_CUTOFF = 8 * 1024**2


def get_vol(sess, volpath, size_cutoff):
    volpath = os.path.normpath(volpath)
    volume_fd = os.open(volpath, os.O_DIRECTORY)
    fs, fs_created = get_or_create(sess,
示例#36
0
    def create_table(self,
                     table,
                     migrate=True,
                     fake_migrate=False,
                     polymodel=None):
        db = table._db
        table._migrate = migrate
        fields = []
        # PostGIS geo fields are added after the table has been created
        postcreation_fields = []
        sql_fields = {}
        sql_fields_aux = {}
        TFK = {}
        tablename = table._tablename
        types = self.adapter.types
        for sortable, field in enumerate(table, start=1):
            field_name = field.name
            field_type = field.type
            if isinstance(field_type, SQLCustomType):
                ftype = field_type.native or field_type.type
            elif field_type.startswith(('reference', 'big-reference')):
                if field_type.startswith('reference'):
                    referenced = field_type[10:].strip()
                    type_name = 'reference'
                else:
                    referenced = field_type[14:].strip()
                    type_name = 'big-reference'

                if referenced == '.':
                    referenced = tablename
                constraint_name = self.dialect.constraint_name(
                    table._raw_rname, field._raw_rname)
                # if not '.' in referenced \
                #         and referenced != tablename \
                #         and hasattr(table,'_primarykey'):
                #     ftype = types['integer']
                #else:
                try:
                    rtable = db[referenced]
                    rfield = rtable._id
                    rfieldname = rfield.name
                    rtablename = referenced
                except (KeyError, ValueError, AttributeError) as e:
                    self.db.logger.debug('Error: %s' % e)
                    try:
                        rtablename, rfieldname = referenced.split('.')
                        rtable = db[rtablename]
                        rfield = rtable[rfieldname]
                    except Exception as e:
                        self.db.logger.debug('Error: %s' % e)
                        raise KeyError(
                            'Cannot resolve reference %s in %s definition' %
                            (referenced, table._tablename))

                # must be PK reference or unique
                if getattr(rtable, '_primarykey', None) and rfieldname in \
                   rtable._primarykey or rfield.unique:
                    ftype = types[rfield.type[:9]] % \
                        dict(length=rfield.length)
                    # multicolumn primary key reference?
                    if not rfield.unique and len(rtable._primarykey) > 1:
                        # then it has to be a table level FK
                        if rtablename not in TFK:
                            TFK[rtablename] = {}
                        TFK[rtablename][rfieldname] = field_name
                    else:
                        fk = rtable._rname + ' (' + rfield._rname + ')'
                        ftype = ftype + \
                            types['reference FK'] % dict(
                                # should be quoted
                                constraint_name=constraint_name,
                                foreign_key=fk,
                                table_name=table._rname,
                                field_name=field._rname,
                                on_delete_action=field.ondelete)
                else:
                    # make a guess here for circular references
                    if referenced in db:
                        id_fieldname = db[referenced]._id._rname
                    elif referenced == tablename:
                        id_fieldname = table._id._rname
                    else:  # make a guess
                        id_fieldname = self.dialect.quote('id')
                    #gotcha: the referenced table must be defined before
                    #the referencing one to be able to create the table
                    #Also if it's not recommended, we can still support
                    #references to tablenames without rname to make
                    #migrations and model relationship work also if tables
                    #are not defined in order
                    if referenced == tablename:
                        real_referenced = db[referenced]._rname
                    else:
                        real_referenced = (referenced in db
                                           and db[referenced]._rname
                                           or referenced)
                    rfield = db[referenced]._id
                    ftype_info = dict(
                        index_name=self.dialect.quote(field._raw_rname +
                                                      '__idx'),
                        field_name=field._rname,
                        constraint_name=self.dialect.quote(constraint_name),
                        foreign_key='%s (%s)' %
                        (real_referenced, rfield._rname),
                        on_delete_action=field.ondelete)
                    ftype_info['null'] = ' NOT NULL' if field.notnull else \
                        self.dialect.allow_null
                    ftype_info['unique'] = ' UNIQUE' if field.unique else ''
                    ftype = types[type_name] % ftype_info
            elif field_type.startswith('list:reference'):
                ftype = types[field_type[:14]]
            elif field_type.startswith('decimal'):
                precision, scale = map(int, field_type[8:-1].split(','))
                ftype = types[field_type[:7]] % \
                    dict(precision=precision, scale=scale)
            elif field_type.startswith('geo'):
                if not hasattr(self.adapter, 'srid'):
                    raise RuntimeError('Adapter does not support geometry')
                srid = self.adapter.srid
                geotype, parms = field_type[:-1].split('(')
                if geotype not in types:
                    raise SyntaxError('Field: unknown field type: %s for %s' %
                                      (field_type, field_name))
                ftype = types[geotype]
                if self.dbengine == 'postgres' and geotype == 'geometry':
                    if self.db._ignore_field_case is True:
                        field_name = field_name.lower()
                    # parameters: schema, srid, dimension
                    dimension = 2  # GIS.dimension ???
                    parms = parms.split(',')
                    if len(parms) == 3:
                        schema, srid, dimension = parms
                    elif len(parms) == 2:
                        schema, srid = parms
                    else:
                        schema = parms[0]
                    ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[
                        geotype]
                    ftype = ftype % dict(schema=schema,
                                         tablename=table._raw_rname,
                                         fieldname=field._raw_rname,
                                         srid=srid,
                                         dimension=dimension)
                    postcreation_fields.append(ftype)
            elif field_type not in types:
                raise SyntaxError('Field: unknown field type: %s for %s' %
                                  (field_type, field_name))
            else:
                ftype = types[field_type] % {'length': field.length}

            if not field_type.startswith(('id', 'reference', 'big-reference')):
                if field.notnull:
                    ftype += ' NOT NULL'
                else:
                    ftype += self.dialect.allow_null
                if field.unique:
                    ftype += ' UNIQUE'
                if field.custom_qualifier:
                    ftype += ' %s' % field.custom_qualifier

            # add to list of fields
            sql_fields[field_name] = dict(length=field.length,
                                          unique=field.unique,
                                          notnull=field.notnull,
                                          sortable=sortable,
                                          type=str(field_type),
                                          sql=ftype,
                                          rname=field._rname,
                                          raw_rname=field._raw_rname)

            if field.notnull and field.default is not None:
                # Caveat: sql_fields and sql_fields_aux
                # differ for default values.
                # sql_fields is used to trigger migrations and sql_fields_aux
                # is used for create tables.
                # The reason is that we do not want to trigger
                # a migration simply because a default value changes.
                not_null = self.dialect.not_null(field.default, field_type)
                ftype = ftype.replace('NOT NULL', not_null)
            sql_fields_aux[field_name] = dict(sql=ftype)
            # Postgres - PostGIS:
            # geometry fields are added after the table has been created, not now
            if not (self.dbengine == 'postgres'
                    and field_type.startswith('geom')):
                fields.append('%s %s' % (field._rname, ftype))
        other = ';'

        # backend-specific extensions to fields
        if self.dbengine == 'mysql':
            if not hasattr(table, "_primarykey"):
                fields.append('PRIMARY KEY (%s)' % (table._id._rname))
            engine = self.adapter.adapter_args.get('engine', 'InnoDB')
            other = ' ENGINE=%s CHARACTER SET utf8;' % engine

        fields = ',\n    '.join(fields)
        for rtablename in TFK:
            rtable = db[rtablename]
            rfields = TFK[rtablename]
            pkeys = [rtable[pk]._rname for pk in rtable._primarykey]
            fk_fields = [table[rfields[k]] for k in rtable._primarykey]
            fkeys = [f._rname for f in fk_fields]
            constraint_name = self.dialect.constraint_name(
                table._raw_rname, '_'.join(f._raw_rname for f in fk_fields))
            on_delete = list(set(f.ondelete for f in fk_fields))
            if len(on_delete) > 1:
                raise SyntaxError(
                    'Table %s has incompatible ON DELETE actions in multi-field foreign key.'
                    % table._dalname)
            fields = fields + ',\n    ' + \
                types['reference TFK'] % dict(
                    constraint_name=constraint_name,
                    table_name=table._rname,
                    field_name=', '.join(fkeys),
                    foreign_table=rtable._rname,
                    foreign_key=', '.join(pkeys),
                    on_delete_action=on_delete[0])

        if getattr(table, '_primarykey', None):
            query = "CREATE TABLE %s(\n    %s,\n    %s) %s" % \
                (table._rname, fields,
                 self.dialect.primary_key(', '.join([
                    table[pk]._rname
                    for pk in table._primarykey])), other)
        else:
            query = "CREATE TABLE %s(\n    %s\n)%s" % \
                (table._rname, fields, other)

        uri = self.adapter.uri
        if uri.startswith('sqlite:///') \
                or uri.startswith('spatialite:///'):
            if PY2:
                path_encoding = sys.getfilesystemencoding() \
                    or locale.getdefaultlocale()[1] or 'utf8'
                dbpath = uri[9:uri.rfind('/')].decode('utf8').encode(
                    path_encoding)
            else:
                dbpath = uri[9:uri.rfind('/')]
        else:
            dbpath = self.adapter.folder

        if not migrate:
            return query
        elif uri.startswith('sqlite:memory') or \
                uri.startswith('spatialite:memory'):
            table._dbt = None
        elif isinstance(migrate, string_types):
            table._dbt = pjoin(dbpath, migrate)
        else:
            table._dbt = pjoin(dbpath,
                               '%s_%s.table' % (db._uri_hash, tablename))

        if not table._dbt or not self.file_exists(table._dbt):
            if table._dbt:
                self.log(
                    'timestamp: %s\n%s\n' %
                    (datetime.datetime.today().isoformat(), query), table)
            if not fake_migrate:
                self.adapter.create_sequence_and_triggers(query, table)
                db.commit()
                # Postgres geom fields are added now,
                # after the table has been created
                for query in postcreation_fields:
                    self.adapter.execute(query)
                    db.commit()
            if table._dbt:
                tfile = self.file_open(table._dbt, 'wb')
                pickle.dump(sql_fields, tfile)
                self.file_close(tfile)
                if fake_migrate:
                    self.log('faked!\n', table)
                else:
                    self.log('success!\n', table)
        else:
            tfile = self.file_open(table._dbt, 'rb')
            try:
                sql_fields_old = pickle.load(tfile)
            except EOFError:
                self.file_close(tfile)
                raise RuntimeError('File %s appears corrupted' % table._dbt)
            self.file_close(tfile)
            # add missing rnames
            for key, item in sql_fields_old.items():
                tmp = sql_fields.get(key)
                if tmp:
                    item.setdefault('rname', tmp['rname'])
                    item.setdefault('raw_rname', tmp['raw_rname'])
                else:
                    item.setdefault('rname', self.dialect.quote(key))
                    item.setdefault('raw_rname', key)
            if sql_fields != sql_fields_old:
                self.migrate_table(table,
                                   sql_fields,
                                   sql_fields_old,
                                   sql_fields_aux,
                                   None,
                                   fake_migrate=fake_migrate)
        return query
示例#37
0
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import *

import os.path
import sys
import subprocess
import io

FSENCODING = sys.getfilesystemencoding() or "utf-8"

# Sets the default encoding to utf-8.
old_stdout = sys.stdout
old_stderr = sys.stderr

if PY2:
    sys_executable = sys.executable
    reload(sys)
    sys.setdefaultencoding("utf-8")  # @UndefinedVariable
    sys.executable = sys_executable

sys.stdout = old_stdout
sys.stderr = old_stderr

import renpy.error
示例#38
0
文件: files.py 项目: club9822/rest
 def unicode_filename(filename):
     """Return a Unicode version of `filename`."""
     if isinstance(filename, str):
         encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
         filename = filename.decode(encoding, "replace")
     return filename
示例#39
0
def path_to_unicode(path):
    return path.decode(sys.getfilesystemencoding()).encode("utf-8")
示例#40
0
def CleanName(name):

    orig = name

    # Make sure we pre-compose.  Try to decode with reported filesystem encoding, then with UTF-8 since some filesystems lie.
    try:
        name = unicodedata.normalize('NFKC',
                                     name.decode(sys.getfilesystemencoding()))
    except:
        try:
            name = unicodedata.normalize('NFKC', name.decode('utf-8'))
        except:
            pass

    name = name.lower()

    # grab the year, if there is one. set ourselves up to ignore everything after the year later on.
    year = None
    yearMatch = re.search(yearRx, name)
    if yearMatch:
        yearStr = yearMatch.group(2)
        yearInt = int(yearStr)
        if yearInt > 1900 and yearInt < (datetime.date.today().year + 1):
            year = int(yearStr)
            name = name.replace(
                yearMatch.group(1) + yearStr + yearMatch.group(3),
                ' *yearBreak* ')

    # Take out things in brackets. (sub acts weird here, so we have to do it a few times)
    done = False
    while done == False:
        (name, count) = re.subn(r'\[[^\]]+\]', '', name, re.IGNORECASE)
        if count == 0:
            done = True

    # Take out bogus suffixes.
    for suffix in ignore_suffixes:
        rx = re.compile(suffix + '$', re.IGNORECASE)
        name = rx.sub('', name)

    # Take out audio specs, after suffixing with space to simplify rx.
    name = name + ' '
    for s in audio:
        rx = re.compile(s, re.IGNORECASE)
        name = rx.sub(' ', name)

    # Now tokenize.
    tokens = re.split('([^ \-_\.\(\)+]+)', name)

    # Process tokens.
    newTokens = []
    for t in tokens:
        t = t.strip()
        if not re.match('[\.\-_\(\)+]+', t) and len(t) > 0:
            #if t not in ('.', '-', '_', '(', ')') and len(t) > 0:
            newTokens.append(t)

    # Now build a bitmap of good and bad tokens.
    tokenBitmap = []

    garbage = subs
    garbage.extend(misc)
    garbage.extend(format)
    garbage.extend(edition)
    garbage.extend(source)
    garbage.extend(video_exts)
    garbage = set(garbage)

    # Keep track of whether we've encountered a garbage token since they shouldn't appear more than once.
    seenTokens = {}

    # Go through the tokens backwards since the garbage most likely appears at the end of the file name.
    # If we've seen a token already, don't consider it garbage the second time.  Helps cases like "Internal.Affairs.1990-INTERNAL.mkv"
    #
    for t in reversed(newTokens):
        if t.lower() in garbage and t.lower() not in seenTokens:
            tokenBitmap.insert(0, False)
            seenTokens[t.lower()] = True
        else:
            tokenBitmap.insert(0, True)

    # Now strip out the garbage, with one heuristic; if we encounter 2+ BADs after encountering
    # a GOOD, take out the rest (even if they aren't BAD). Special case for director's cut.
    numGood = 0
    numBad = 0

    finalTokens = []

    for i in range(len(tokenBitmap)):
        good = tokenBitmap[i]

        # If we've only got one or two tokens, don't whack any, they might be part of
        # the actual name (e.g. "Internal Affairs" "XXX 2")
        #
        if len(tokenBitmap) <= 2:
            good = True

        if good and numBad < 2:
            if newTokens[i] == '*yearBreak*':
                # If the year token is first just skip it and keep reading,
                # otherwise we can ignore everything after it.
                #
                if i == 0:
                    continue
                else:
                    break
            else:
                finalTokens.append(newTokens[i])
        elif not good and newTokens[i].lower() == 'dc':
            finalTokens.append("(Director's cut)")

        if good == True:
            numGood += 1
        else:
            numBad += 1

    # If we took *all* the tokens out, use the first one, otherwise we'll end up with no name at all.
    if len(finalTokens) == 0 and len(newTokens) > 0:
        finalTokens.append(newTokens[0])

    #print "CLEANED [%s] => [%s]" % (orig, u' '.join(finalTokens))
    #print "TOKENS: ", newTokens
    #print "BITMAP: ", tokenBitmap
    #print "FINAL:  ", finalTokens

    cleanedName = ' '.join(finalTokens)

    # If we failed to decode/encode above, we may still be dealing with a non-ASCII string here,
    # which will raise if we try to encode it, so let's just handle it and hope for the best!
    #
    try:
        cleanedName = cleanedName.encode('utf-8')
    except:
        pass

    return (titlecase.titlecase(cleanedName), year)
示例#41
0
 def fsdecode(s):
     if isinstance(s, unicode):  # noqa
         return s
     return s.decode(sys.getfilesystemencoding())
示例#42
0
# multitracker extensions by John Hoffman
# see LICENSE.txt for license information

from os.path import getsize, split, join, abspath, isdir
from os import listdir
from sha import sha
from copy import copy
from string import strip
from horde.BitTornado.bencode import bencode
from btformats import check_info
from threading import Event
from time import time
from traceback import print_exc
try:
    from sys import getfilesystemencoding
    ENCODING = getfilesystemencoding()
except:
    from sys import getdefaultencoding
    ENCODING = getdefaultencoding()

defaults = [('announce_list', '', 'a list of announce URLs - explained below'),
            ('httpseeds', '', 'a list of http seed URLs - explained below'),
            ('piece_size_pow2', 0,
             "which power of 2 to set the piece size to (0 = automatic)"),
            ('comment', '',
             "optional human-readable comment to put in .torrent"),
            ('filesystem_encoding', '',
             "optional specification for filesystem encoding " +
             "(set automatically in recent Python versions)"),
            ('target', '', "optional target file for the torrent")]
示例#43
0
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.

import getpass
import logging
import os
import tempfile
import unittest
import StringIO
import subprocess
import sys
import time

BASE_DIR = os.path.dirname(
    os.path.abspath(__file__.decode(sys.getfilesystemencoding())))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party'))

FILE_PATH = os.path.abspath(__file__.decode(sys.getfilesystemencoding()))

from depot_tools import auto_stub
from depot_tools import fix_encoding
import test_utils
from utils import file_path
from utils import fs


def write_content(filepath, content):
    with fs.open(filepath, 'wb') as f:
示例#44
0
 def add(self, filename):
     self._repo.add([filename.encode(sys.getfilesystemencoding())])
示例#45
0
文件: web.py 项目: allen909/web_cmd
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QTableWidget
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QTableWidgetItem
from requests.exceptions import ConnectionError
from requests.exceptions import TooManyRedirects
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ReadTimeout
# from requests.exceptions import ConnectTimeout
# from requests.exceptions import Timeout
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

types = sys.getfilesystemencoding()
sys.dont_write_bytecode = True

USER_AGENTS = [
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)',
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.0.16 (.NET CLR 3.5.30729)',
    'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
    'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.517.24 Safari/534.7',
    'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.16 SUSE/10.0.626.0 (KHTML, like Gecko) Chrome/10.0.626.0 Safari/534.16',
    'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20',
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
    'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; de-de) AppleWebKit/531.22.7 (KHTML, like Gecko) Version/4.0.5 Safari/531.22.7',
    'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.4 (KHTML, like Gecko) Chrome/6.0.481.0 Safari/534.4',
    'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.1b4) Gecko/20090423 Firefox/3.5b4 (.NET CLR 3.5.30729)',
    'Mozilla/5.0 (Windows; U; Windows NT 6.0; nb-NO) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
示例#46
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys

sys.path.append(r'D:\Users\Kan\Documents\GitHub\XAPI3\languages\Python')

# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding())  # 查看修改前的
try:
    sys._enablelegacywindowsfsencoding()  # 修改
    print(sys.getfilesystemencoding())  # 查看修改后的
except:
    pass

from test_ctp_api import main

configs = {
    'root_dir':
    r'D:\Users\Kan\Documents\GitHub\XAPI3\languages\Python\test_ctp\某用户',
    'api': {
        'Address': br'tcp://180.168.146.187:10110',
        'BrokerID': b'9999',
        'UserID': b'654321',
        'Password': b'123456',
    },
    'td': {
        'Address': br'tcp://180.168.146.187:10100',
        'BrokerID': b'9999',
        'AppID': b'8500342533',
        'AuthCode': b'0000000000000000',
        'UserID': b'14078611',
示例#47
0
    def modal(self, context, event):
        if self.left_clicked:
            self.left_clicked = False
            active_image, pos = click_inside_images_view(event)
            if active_image is not None:
                xyz = _addon().slices_were_clicked(active_image, pos)
                bpy.context.scene.cursor_location = tuple(xyz)
                set_cursor_pos()
                if bpy.context.scene.cursor_is_snapped:  # and is_view_3d():
                    _addon().set_tkreg_ras(
                        bpy.context.scene.cursor_location * 10, False)
                    snap_cursor(True)
                    if bpy.context.scene.slices_rotate_view_on_click:
                        mu.rotate_view_to_vertice()
                # if bpy.context.scene.slices_zoom > 1:
                #     ohad(pos/bpy.context.scene.slices_zoom)
                return {'PASS_THROUGH'}
            if not click_inside_3d_view(event):
                return {'PASS_THROUGH'}
            if _addon().meg.change_cursor_on_selection():
                cluster = _addon().select_meg_cluster(
                    event, context, bpy.context.scene.cursor_location)
                if cluster is not None:
                    return {'PASS_THROUGH'}
            cursor_moved = np.linalg.norm(
                SelectionListener.cursor_pos -
                bpy.context.scene.cursor_location) > 1e-3
            if cursor_moved and bpy.data.objects.get('inner_skull',
                                                     None) is not None:
                _addon().find_point_thickness()
                return {'PASS_THROUGH'}
            if bpy.context.scene.cursor_is_snapped:  # and is_view_3d():
                snap_cursor(True)
            if _addon().fMRI_clusters_files_exist(
            ) and bpy.context.scene.plot_fmri_cluster_per_click:
                _addon().find_closest_cluster(only_within=True)

            tkreg_ras = _addon().calc_tkreg_ras_from_cursor()
            if tkreg_ras is not None:
                _addon().set_tkreg_ras(tkreg_ras, move_cursor=False)
            # if _addon().is_pial():
            #     tkreg_ras = bpy.context.scene.cursor_location * 10
            #     _addon().set_tkreg_ras(tkreg_ras)
            # elif bpy.context.scene.cursor_is_snapped:
            #     tkreg_ras = _addon().calc_tkreg_ras_from_snapped_cursor()
            #     _addon().set_tkreg_ras(tkreg_ras)

            if cursor_moved:
                set_cursor_pos()
                # print('cursor position was changed by the user!')
                _addon().create_slices(pos=tkreg_ras)
                _addon().freeview.save_cursor_position()
                clear_slice()
            if bpy.context.scene.find_closest_label_on_click:  # coloring_panel.WIC_CONTOURS in _addon().what_is_colored():
                _addon().find_closest_label(
                    plot_contour=bpy.context.scene.plot_closest_label_contour)

        if self.right_clicked:
            self.right_clicked = False
            if not click_inside_3d_view(event):
                return {'PASS_THROUGH'}
            # print(bpy.context.selected_objects)
            # cluster = _addon().select_meg_cluster(event, context)
            # if cluster is not None:
            #     return {'PASS_THROUGH'}
            if len(bpy.context.selected_objects):
                mu.unfilter_graph_editor()
                if bpy.context.scene.fit_graph_on_selection:
                    mu.view_all_in_graph_editor()
                selected_obj = bpy.context.active_object  # bpy.context.selected_objects[-1]
                selected_obj_name = selected_obj.name
                selected_obj_type = mu.check_obj_type(selected_obj_name)
                if selected_obj_type in [
                        mu.OBJ_TYPE_CORTEX_LH, mu.OBJ_TYPE_CORTEX_RH
                ]:
                    _addon().select_roi(selected_obj_name)
                elif selected_obj_type in [
                        mu.OBJ_TYPE_CORTEX_INFLATED_LH,
                        mu.OBJ_TYPE_CORTEX_INFLATED_RH
                ]:
                    pial_obj_name = selected_obj_name[len('inflated_'):]
                    pial_obj = bpy.data.objects.get(pial_obj_name)
                    if not pial_obj is None:
                        # pial_obj.select = True
                        _addon().select_roi(pial_obj_name)
                        # mu.change_selected_fcurves_colors(pial_obj)
                        # mu.change_selected_fcurves_colors()
                elif selected_obj_type == mu.OBJ_TYPE_CON:
                    _addon().select_connection(selected_obj_name)
                elif selected_obj_type == mu.OBJ_TYPE_CON_VERTICE:
                    _addon().vertices_selected(selected_obj_name)
                elif selected_obj_type == mu.OBJ_TYPE_ELECTRODE:
                    bpy.context.scene.cursor_is_snapped = False
                    _addon().electode_was_manually_selected(selected_obj_name)
                    try:
                        _addon().dell.dell_ct_electrode_was_selected(
                            selected_obj_name)
                    except:
                        pass
                if bpy.context.scene.find_curves_sep_auto:
                    _addon().calc_best_curves_sep()
                elif bpy.context.scene.curves_sep > 0:
                    _addon().curves_sep_update()
            else:
                _addon().clear_electrodes_selection()
                #todo: should call to _addon().clear_rois_selection()
                # if is_activity():
                #     bpy.context.scene.cursor_location = mouse_coo_to_3d_loc(event, context)
                #     snap_cursor(True)
                #     _addon().find_closest_label()

        if time.time() - self.press_time > 1 and event.value == 'PRESS':
            if event.type == 'RIGHTMOUSE':
                self.press_time = time.time()
                self.right_clicked = True
            if event.type == 'LEFTMOUSE':
                self.press_time = time.time()
                self.left_clicked = True

        if time.time() - self.press_time > 0.1:
            if event.type == 'TIMER':
                if bpy.context.scene.rotate_brain:
                    if _addon().render.is_camera_view():
                        _addon().render.camera_mode('ORTHO')
                        _addon().show_hide.rotate_brain()
                        _addon().render.camera_mode('CAMERA')
                    else:
                        _addon().show_hide.rotate_brain()

        if _addon() and _addon().render_in_queue():
            rendering_data = mu.queue_get(_addon().render_in_queue())
            if not rendering_data is None:
                try:
                    rendering_data = rendering_data.decode(
                        sys.getfilesystemencoding(), 'ignore')
                    if '*** finish rendering! ***' in rendering_data.lower():
                        print('Finish rendering!')
                        _addon().finish_rendering()
                except:
                    print("Can't read the stdout from the rendering")

        return {'PASS_THROUGH'}
示例#48
0
def main(**kwargs):
    """
    Entry point for dx-build-app(let).

    Don't call this function as a subroutine in your program! It is liable to
    sys.exit your program when it detects certain error conditions, so you
    can't recover from those as you could if it raised exceptions. Instead,
    call dx_build_app.build_and_upload_locally which provides the real
    implementation for dx-build-app(let) but is easier to use in your program.
    """

    if len(sys.argv) > 0:
        if sys.argv[0].endswith('dx-build-app'):
            logging.warn('Warning: dx-build-app has been replaced with "dx build --create-app". Please update your scripts.')
        elif sys.argv[0].endswith('dx-build-applet'):
            logging.warn('Warning: dx-build-applet has been replaced with "dx build". Please update your scripts.')

    if len(kwargs) == 0:
        args = parser.parse_args()
    else:
        args = parser.parse_args(**kwargs)

    if dxpy.AUTH_HELPER is None and not args.dry_run:
        parser.error('Authentication required to build an executable on the platform; please run "dx login" first')

    if args.src_dir is None:
        args.src_dir = os.getcwd()
        if USING_PYTHON2:
            args.src_dir = args.src_dir.decode(sys.getfilesystemencoding())

    if args.mode == "app" and args.destination != '.':
        parser.error("--destination cannot be used when creating an app (only an applet)")

    if args.dx_toolkit_autodep in ['beta', 'unstable']:
        logging.warn('The --dx-toolkit-beta-autodep and --dx-toolkit-unstable-autodep flags have no effect and will be removed at some date in the future.')

    if args.overwrite and args.archive:
        parser.error("Options -f/--overwrite and -a/--archive cannot be specified together")

    if args.run is not None and args.dry_run:
        parser.error("Options --dry-run and --run cannot be specified together")

    if args.run and args.remote and args.mode == 'app':
        parser.error("Options --remote, --app, and --run cannot all be specified together. Try removing --run and then separately invoking dx run.")

    executable_id = _build_app(args,
                               json.loads(args.extra_args) if args.extra_args else {})

    if args.run is not None:

        if executable_id is None:
            raise AssertionError('Expected executable_id to be set here')

        try:
            subprocess.check_call(['dx', 'run', executable_id, '--priority', 'high'] + args.run)
        except subprocess.CalledProcessError as e:
            sys.exit(e.returncode)
        except:
            err_exit()

    return
示例#49
0
    Dirent_pp = ctypes.POINTER(Dirent_p)

    libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
    opendir = libc.opendir
    opendir.argtypes = [ctypes.c_char_p]
    opendir.restype = DIR_p

    readdir_r = libc.readdir_r
    readdir_r.argtypes = [DIR_p, Dirent_p, Dirent_pp]
    readdir_r.restype = ctypes.c_int

    closedir = libc.closedir
    closedir.argtypes = [DIR_p]
    closedir.restype = ctypes.c_int

    file_system_encoding = sys.getfilesystemencoding()

    class PosixDirEntry(object):
        __slots__ = ('name', '_d_type', '_lstat', '_path')

        def __init__(self, path, name, d_type):
            self._path = path
            self.name = name
            self._d_type = d_type
            self._lstat = None

        def lstat(self):
            if self._lstat is None:
                self._lstat = lstat(join(self._path, self.name))
            return self._lstat
示例#50
0
def get_filesystem_encoding():
    return sys.getfilesystemencoding() or sys.getdefaultencoding()
示例#51
0
# Lambda may not be necessary.
# pylint: disable=W0108

import functools
import logging
import os
import signal
import sys
import threading
import time
import traceback
import unittest

ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
    __file__.decode(sys.getfilesystemencoding()))))
sys.path.insert(0, ROOT_DIR)

from utils import threading_utils


def timeout(max_running_time):
  """Test method decorator that fails the test if it executes longer
  than |max_running_time| seconds.

  It exists to terminate tests in case of deadlocks. There's a high chance that
  process is broken after such timeout (due to hanging deadlocked threads that
  can own some shared resources). But failing early (maybe not in a cleanest
  way) due to timeout is generally better than hanging indefinitely.

  |max_running_time| should be an order of magnitude (or even two orders) larger
示例#52
0
GLOBAL_TIMEOUT = 3
# test output verbosity
VERBOSITY = 1 if os.getenv('SILENT') or TOX else 2
# be more tolerant if we're on travis / appveyor in order to avoid
# false positives
if TRAVIS or APPVEYOR:
    NO_RETRIES *= 3
    GLOBAL_TIMEOUT *= 3

# --- files

TESTFILE_PREFIX = '$testfn'
TESTFN = os.path.join(os.path.realpath(os.getcwd()), TESTFILE_PREFIX)
_TESTFN = TESTFN + '-internal'
TESTFN_UNICODE = TESTFN + u("-ƒőő")
ASCII_FS = sys.getfilesystemencoding().lower() in ('ascii', 'us-ascii')

# --- paths

ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
SCRIPTS_DIR = os.path.join(ROOT_DIR, 'scripts')
HERE = os.path.abspath(os.path.dirname(__file__))

# --- support

HAS_CPU_AFFINITY = hasattr(psutil.Process, "cpu_affinity")
HAS_CPU_FREQ = hasattr(psutil, "cpu_freq")
HAS_CONNECTIONS_UNIX = POSIX and not SUNOS
HAS_ENVIRON = hasattr(psutil.Process, "environ")
HAS_PROC_IO_COUNTERS = hasattr(psutil.Process, "io_counters")
HAS_IONICE = hasattr(psutil.Process, "ionice")
示例#53
0
"""

import importlib
import inspect
import io
import itertools
import os
import os.path
import re
import subprocess
import sys
import pytest
import six

FILESYSTEM_ENCODING = str(sys.getfilesystemencoding()
                          or sys.getdefaultencoding())

HERE = os.path.abspath(os.path.dirname(__file__))

PROJECT_ROOT = os.path.normpath(os.path.join(HERE, u"..", u".."))

# Directories which are ignored when checking Python source code files
IGNORED_DIRS = [u"ckan/include", u"contrib/cookiecutter"]


def walk_python_files(ext=".py"):
    u"""
    Generator that yields all CKAN Python source files.

    Yields 2-tuples containing the filename in absolute and relative (to
示例#54
0
def pkg_config(packages, default_libraries, executable='pkg-config'):
    """
    Uses pkg-config to update a set of distutils Extension arguments
    to include the flags necessary to link against the given packages.

    If the pkg-config lookup fails, default_libraries is applied to
    libraries.

    Parameters
    ----------
    packages : list of str
        A list of pkg-config packages to look up.

    default_libraries : list of str
        A list of library names to use if the pkg-config lookup fails.

    Returns
    -------
    config : dict
        A dictionary containing keyword arguments to
        `distutils.Extension`.  These entries include:

        - ``include_dirs``: A list of include directories
        - ``library_dirs``: A list of library directories
        - ``libraries``: A list of libraries
        - ``define_macros``: A list of macro defines
        - ``undef_macros``: A list of macros to undefine
        - ``extra_compile_args``: A list of extra arguments to pass to
          the compiler
    """

    flag_map = {
        '-I': 'include_dirs',
        '-L': 'library_dirs',
        '-l': 'libraries',
        '-D': 'define_macros',
        '-U': 'undef_macros'
    }
    command = "{0} --libs --cflags {1}".format(executable, ' '.join(packages)),

    result = DistutilsExtensionArgs()

    try:
        pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
        output = pipe.communicate()[0].strip()
    except subprocess.CalledProcessError as e:
        lines = [("{0} failed. This may cause the build to fail below.".format(
            executable)), "  command: {0}".format(e.cmd),
                 "  returncode: {0}".format(e.returncode),
                 "  output: {0}".format(e.output)]
        log.warn('\n'.join(lines))
        result['libraries'].extend(default_libraries)
    else:
        if pipe.returncode != 0:
            lines = [
                "pkg-config could not lookup up package(s) {0}.".format(
                    ", ".join(packages)),
                "This may cause the build to fail below."
            ]
            log.warn('\n'.join(lines))
            result['libraries'].extend(default_libraries)
        else:
            for token in output.split():
                # It's not clear what encoding the output of
                # pkg-config will come to us in.  It will probably be
                # some combination of pure ASCII (for the compiler
                # flags) and the filesystem encoding (for any argument
                # that includes directories or filenames), but this is
                # just conjecture, as the pkg-config documentation
                # doesn't seem to address it.
                arg = token[:2].decode('ascii')
                value = token[2:].decode(sys.getfilesystemencoding())
                if arg in flag_map:
                    if arg == '-D':
                        value = tuple(value.split('=', 1))
                    result[flag_map[arg]].append(value)
                else:
                    result['extra_compile_args'].append(value)

    return result
示例#55
0
def getAssemblyFiles(pth, manifest=None, redirects=None):
    """
    Find all assemblies that are dependencies of the given binary and return the files
    that make up the assemblies as (name, fullpath) tuples.

    If a WinManifest object is passed as `manifest`, also updates that manifest to
    reference the returned assemblies. This is done only to update the built app's .exe
    with the dependencies of python.exe

    If a list is passed as `redirects`, and binding redirects in policy files are
    applied when searching for assemblies, BindingRedirect objects are appended to this
    list.

    Return a list of pairs (name, fullpath)
    """
    rv = []
    if manifest:
        _depNames = set(dep.name for dep in manifest.dependentAssemblies)
    for assembly in getAssemblies(pth):
        if assembly.getid().upper() in seen:
            continue
        if manifest and assembly.name not in _depNames:
            # Add assembly as dependency to our final output exe's manifest
            logger.info(
                "Adding %s to dependent assemblies "
                "of final executable\n  required by %s", assembly.name, pth)
            manifest.dependentAssemblies.append(assembly)
            _depNames.add(assembly.name)
        if not dylib.include_library(assembly.name):
            logger.debug("Skipping assembly %s", assembly.getid())
            continue
        if assembly.optional:
            logger.debug("Skipping optional assembly %s", assembly.getid())
            continue

        from ..config import CONF
        if CONF.get("win_no_prefer_redirects"):
            files = assembly.find_files()
        else:
            files = []
        if not len(files):
            # If no files were found, it may be the case that the required version
            # of the assembly is not installed, and the policy file is redirecting it
            # to a newer version. So, we collect the newer version instead.
            files = assembly.find_files(ignore_policies=False)
            if len(files) and redirects is not None:
                # New version was found, old version was not. Add a redirect in the
                # app configuration
                old_version = assembly.version
                new_version = assembly.get_policy_redirect()
                logger.info("Adding redirect %s version %s -> %s",
                            assembly.name, old_version, new_version)
                redirects.append(
                    BindingRedirect(
                        name=assembly.name,
                        language=assembly.language,
                        arch=assembly.processorArchitecture,
                        publicKeyToken=assembly.publicKeyToken,
                        oldVersion=old_version,
                        newVersion=new_version,
                    ))

        if files:
            seen.add(assembly.getid().upper())
            for fn in files:
                fname, fext = os.path.splitext(fn)
                if fext.lower() == ".manifest":
                    nm = assembly.name + fext
                else:
                    nm = os.path.basename(fn)
                ftocnm = nm
                if assembly.language not in (None, "", "*", "neutral"):
                    ftocnm = os.path.join(assembly.getlanguage(), ftocnm)
                nm, ftocnm, fn = [
                    item.encode(sys.getfilesystemencoding())
                    for item in (nm, ftocnm, fn)
                ]
                if fn.upper() not in seen:
                    logger.debug("Adding %s", ftocnm)
                    seen.add(nm.upper())
                    seen.add(fn.upper())
                    rv.append((ftocnm, fn))
                else:
                    #logger.info("skipping %s part of assembly %s dependency of %s",
                    #            ftocnm, assembly.name, pth)
                    pass
        else:
            logger.error("Assembly %s not found", assembly.getid())

    # Convert items in list from 'bytes' type to 'str' type.
    # NOTE: With Python 3 we somehow get type 'bytes' and it
    #       then causes other issues and failures with PyInstaller.
    new_rv = []
    for item in rv:
        a = item[0].decode('ascii')
        b = item[1].decode('ascii')
        new_rv.append((a, b))
    rv = new_rv

    return rv
示例#56
0
 def __init__(self, config, verify, log):
     self.config = config
     self.verify = verify
     self.log = log
     self.fsencoding = sys.getfilesystemencoding()
示例#57
0
        Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""

from __future__ import absolute_import, division
from psychopy import locale_setup, gui, visual, core, data, event, logging, sound
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
                                STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np  # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
                   sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os  # handy system and path functions
import sys  # to get file system encoding

# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)

# Store info about the experiment session
expName = 'Exp.battements'  # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
    core.quit()  # user pressed cancel
expInfo['date'] = data.getDateStr()  # add a simple timestamp
expInfo['expName'] = expName

# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])

# An ExperimentHandler isn't essential but helps with data saving
def get_filesystem_encoding():return sys.getfilesystemencoding()or sys.getdefaultencoding()
def _make_text_stream(stream,encoding,errors,force_readable=_B,force_writable=_B):
def run3_func(expInfo):
    # main function
# Ensure that relative paths start from the same directory as this script
    _thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
    os.chdir(_thisDir)

# Store info about the experiment session
    expName = u'tnac_exp' # from the Builder filename that created this script
    expInfo['expName'] = expName

# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
    filename = _thisDir + os.sep + u'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])

    trg_dict = {"music": 1,
                "voice": 2,
                "song": 3,
                "sound_off": 100,
                "pause_block": 200,
                "stop_run": 300,
                "start_run": 400,
                "start_block": 500,
                }


    #define path to csvs
    run_var= _thisDir+'/run3.csv'
    # An ExperimentHandler isn't essential but helps with data saving
    thisExp = data.ExperimentHandler(name=expName, version='',
        extraInfo=expInfo, runtimeInfo=None,
        originPath=None,
        savePickle=True, saveWideText=True,
        dataFileName=filename)
    # save a log file for detail verbose info
    logFile = logging.LogFile(filename+'.log', level=logging.EXP)
    logging.console.setLevel(logging.WARNING)  # this outputs to the screen, not a file

    endExpNow = False  # flag for 'escape' or other condition => quit the exp

    # Start Code - component code to be run before the window creation

    # Setup the Window
    ## TODO: set window to fullscreen
    win = visual.Window(
        size=[1366, 768], fullscr=True, screen=0,
        allowGUI=True, allowStencil=False,
        monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
        blendMode='avg', useFBO=True)
    # store frame rate of monitor if we can measure it
    expInfo['frameRate'] = win.getActualFrameRate()
    if expInfo['frameRate'] != None:
        frameDur = 1.0 / round(expInfo['frameRate'])
    else:
        frameDur = 1.0 / 60.0  # could not measure, so guess

    # Initialize components for Routine "trial"
    trialClock = core.Clock()
    stim_1 = sound.Sound('A', secs=-1)
    stim_1.setVolume(1)
    fix_1 = visual.TextStim(win=win, name='fix_1',
        text='+',
        font='Arial',
        pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
        color='white', colorSpace='rgb', opacity=1,
        depth=-1.0);

    # Initialize components for Routine "run_start"
    run_startClock = core.Clock()
    run_start_msg_screen = visual.TextStim(win=win, name='run_start_msg_screen',
        text=u'Kurze Pause.',
        font='Arial',
        units='norm', pos=[0,0], height=0.12, wrapWidth=2, ori=0,
        color='white', colorSpace='rgb', opacity=1,
        depth=0.0);

    # Initialize components for Routine "run_trigger_sync"
    StartClock = core.Clock()
    run_trigger_syncClock = core.Clock()
    run_start_msg = visual.TextStim(win=win, name='run_start_msg',
        text='Durchgang beginnt!',
        font='Arial',
        units='norm', pos=[0, 0], height=0.15, wrapWidth=2, ori=0,
        color='white', colorSpace='rgb', opacity=1,
        depth=-1.0);
    movie = visual.MovieStim3(
        win=win, name='movie',units='pix',
        noAudio = True,
        # rename path
        filename='C:\Paradigmen\AG_Brain\Peer\TNAC\movies\mov3.mkv',
        ori=0, pos=(0, 0), opacity=1,
        depth=0.0,)

    # Create some handy timers
    globalClock = core.Clock()  # to track the time since experiment started
    routineTimer = core.CountdownTimer()  # to track time remaining of each (non-slip) routine


    block_delay = [4,5,6]*12
    random.shuffle(block_delay)
    #print(block_delay)
    # ------Prepare to start Routine "run_start"-------
    t = 0
    run_startClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    # update component parameters for each repeat
    run_start_trigger_key = event.BuilderKeyResponse()
    # keep track of which components have finished
    run_startComponents = [run_start_msg_screen, run_start_trigger_key]
    for thisComponent in run_startComponents:
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "run_start"-------
    while continueRoutine:
        # get current time
        t = run_startClock.getTime()
        thisExp.addData('start_run',globalClock.getTime())
        frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
        # update/draw components on each frame

        # *run_start_msg_screen* updates
        if t >= 0.0 and run_start_msg_screen.status == NOT_STARTED:
            # keep track of start time/frame for later
            run_start_msg_screen.tStart = t
            run_start_msg_screen.frameNStart = frameN  # exact frame index
            run_start_msg_screen.setAutoDraw(True)

        # *run_start_trigger_key* updates
        if t >= 0.0 and run_start_trigger_key.status == NOT_STARTED:
            # keep track of start time/frame for later
            run_start_trigger_key.tStart = t
            run_start_trigger_key.frameNStart = frameN  # exact frame index
            run_start_trigger_key.status = STARTED
            # keyboard checking is just starting
            event.clearEvents(eventType='keyboard')
        if run_start_trigger_key.status == STARTED:
            theseKeys = event.getKeys(keyList=['s'])

            # check for quit:
            if "escape" in theseKeys:
                endExpNow = True
            if len(theseKeys) > 0:  # at least one key was pressed
                # a response ends the routine
                continueRoutine = False

        # check if all components have finished
        if not continueRoutine:  # a component has requested a forced-end of Routine
            break
        continueRoutine = False  # will revert to True if at least one component still running
        for thisComponent in run_startComponents:
            if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
                continueRoutine = True
                break  # at least one component has not yet finished

        # check for quit (the Esc key)
        if endExpNow or event.getKeys(keyList=["escape"]):
            core.quit()

        # refresh the screen
        if continueRoutine:  # don't flip if this routine is over or we'll get a blank screen
            win.flip()

    # -------Ending Routine "run_start"-------
    for thisComponent in run_startComponents:
        if hasattr(thisComponent, "setAutoDraw"):
            thisComponent.setAutoDraw(False)
    # the Routine "run_start" was not non-slip safe, so reset the non-slip timer
    routineTimer.reset()

    # ------Prepare to start Routine "run_trigger_sync"-------
    t = 0
    run_trigger_syncClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    # update component parameters for each repeat
    run_trigger_sync_ = event.BuilderKeyResponse()

    # keep track of which components have finished
    run_trigger_syncComponents = [run_trigger_sync_, run_start_msg]
    for thisComponent in run_trigger_syncComponents:
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "run_trigger_sync"-------
    while continueRoutine:
        # get current time
        print('waiting for scanner trigger....')
        t = run_trigger_syncClock.getTime()
        frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
        # update/draw components on each frame

        # *run_trigger_sync_* updates
        if t >= 0.0 and run_trigger_sync_.status == NOT_STARTED:
            # keep track of start time/frame for later
            run_trigger_sync_.tStart = t
            run_trigger_sync_.frameNStart = frameN  # exact frame index
            run_trigger_sync_.status = STARTED
            # keyboard checking is just starting
            win.callOnFlip(run_trigger_sync_.clock.reset)  # t=0 on next screen flip
            event.clearEvents(eventType='keyboard')
        if run_trigger_sync_.status == STARTED:
            theseKeys = event.getKeys(keyList=['t'])

            # check for quit:
            if "escape" in theseKeys:
                endExpNow = True
            if len(theseKeys) > 0:  # at least one key was pressed
                run_trigger_sync_.keys = theseKeys[-1]  # just the last key pressed
                run_trigger_sync_.rt = run_trigger_sync_.clock.getTime()
                # a response ends the routine
                continueRoutine = False

        # *run_start_msg* updates
        if t >= 0.0 and run_start_msg.status == NOT_STARTED:
            # keep track of start time/frame for later
            run_start_msg.tStart = t
            run_start_msg.frameNStart = frameN  # exact frame index
            run_start_msg.setAutoDraw(True)
        frameRemains = 0.0 + 5- win.monitorFramePeriod * 0.75  # most of one frame period left
        if run_start_msg.status == STARTED and t >= frameRemains:
            run_start_msg.setAutoDraw(False)

        # check if all components have finished
        if not continueRoutine:  # a component has requested a forced-end of Routine
            break
        continueRoutine = False  # will revert to True if at least one component still running
        for thisComponent in run_trigger_syncComponents:
            if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
                continueRoutine = True
                break  # at least one component has not yet finished

        # check for quit (the Esc key)
        if endExpNow or event.getKeys(keyList=["escape"]):
            core.quit()

        # refresh the screen
        if continueRoutine:  # don't flip if this routine is over or we'll get a blank screen
            win.flip()

    # -------Ending Routine "run_trigger_sync"-------
    for thisComponent in run_trigger_syncComponents:
        if hasattr(thisComponent, "setAutoDraw"):
            thisComponent.setAutoDraw(False)
    # check responses
    if run_trigger_sync_.keys in ['', [], None]:  # No response was made
        run_trigger_sync_.keys=None
    thisExp.addData('run_trigger_sync_.keys',run_trigger_sync_.keys)
    if run_trigger_sync_.keys != None:  # we had a response
        thisExp.addData('run_trigger_sync_.rt', run_trigger_sync_.rt)
    run_start_timestamp = StartClock.getTime()

    send_trigger(400)
    # the Routine "run_trigger_sync" was not non-slip safe, so reset the non-slip timer
    routineTimer.reset()

    start_delay=False
    delay_counter= 0
    #print(block_delay)
    #print(delay_counter)
    # start movie for whole run (loop over trials)
    mov='movies/mov3.mkv'
    #print(mov)
    movie.setMovie(mov)
    if t >= 0.0 and movie.status == NOT_STARTED:
        # keep track of start time/frame for later
        movie.tStart = t
        movie.frameNStart = frameN  # exact frame index
        movie.setAutoDraw(True)
        frameRemains = 0.0 + 2- win.monitorFramePeriod * 0.75  # most of one frame period left

        # set up handler to look after randomisation of conditions etc
        trials = data.TrialHandler(nReps=1, method='sequential',
            extraInfo=expInfo, originPath=-1,
        trialList=data.importConditions(run_var),
        seed=None, name='trials')
        thisExp.addLoop(trials)  # add the loop to the experiment
        thisTrial = trials.trialList[0]  # so we can initialise stimuli with some values
        # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
        if thisTrial != None:
            for paramName in thisTrial.keys():
                exec(paramName + '= thisTrial.' + paramName)
        stimuli_played=0

        for thisTrial in trials:
            currentLoop = trials
            # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
            if thisTrial != None:
                for paramName in thisTrial.keys():
                    exec(paramName + '= thisTrial.' + paramName)

            # ------Prepare to start Routine "trial"-------
            t = 0
            trialClock.reset()  # clock
            frameN = -1
            continueRoutine = True

            routineTimer.add(2.000000)
            # update component parameters for each repeat
            stim_1.setSound(stimuli, secs=2)
            #read stimuli into dict and set port value
            abc= stimuli.split('/')[0]
            trg = trg_dict.get(abc, 100)
            # keep track of which components have finished
            trialComponents = [stim_1, fix_1]
            for thisComponent in trialComponents:
                if hasattr(thisComponent, 'status'):
                    thisComponent.status = NOT_STARTED

            # -------Start Routine "trial"-------
            while continueRoutine and routineTimer.getTime() > 0:


                # get current time
                t = trialClock.getTime()
                frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
                # update/draw components on each frame
                # start/stop stim_1
                if t >= 0.0 and stim_1.status == NOT_STARTED:
                    # keep track of start time/frame for later
                    stim_1.tStart = t
                    stim_1.frameNStart = frameN  # exact frame index

            ## TODO reinstate: send_trigger(abc)
                    #print(abc)
                    stim_1.play()  # start the sound (it finishes automatically)
                    send_trigger(trg) # send block specific trigger
                    # get time for stimuls start
                    thisExp.addData('stimulus_start_global',globalClock.getTime())
                    thisExp.addData('stimulus_start_routineTimer',routineTimer.getTime())
                    thisExp.addData('stimulus_start_',frameN)
                    #print(stim_1)
                    stimuli_played+=1
                    if stimuli_played%5 == 0:
                        start_delay=True
                    print('stimuli_nr:'+str(stimuli_played))


                frameRemains = 0.0 + 1.5 - win.monitorFramePeriod * 0.75  # most of one frame period left
                #frameRemainsdelay = 0.0 + 1.5- win.monitorFramePeriod * 0.75  # most of one frame period left
                if stim_1.status == STARTED and t >= frameRemains:
                    stim_1.stop()  # stop the sound (if longer than duration)
                    send_trigger(100) # send sound off trigger
                    # get info on stim end
                thisExp.addData('stimulus_end_global',globalClock.getTime())
                thisExp.addData('stimulus_end_routineTimer',routineTimer.getTime())

                # add delay intervall after 5 stimuli
                if stimuli_played %5 == 0 and start_delay and delay_counter != 35:
                    send_trigger(200)
                    delay=block_delay[delay_counter]
                    routineTimer.add(block_delay[delay_counter])
                    #frameRemainsdelay = 0.0 + 1.5 + delay - win.monitorFramePeriod * 0.75  # most of one frame period left
                    #print('delay='+str(delay_counter))
                    delay_counter +=1
                    thisExp.addData('delay_counter',block_delay[delay_counter])
                    thisExp.addData('block_end_global',globalClock.getTime())
                    start_delay=False
                
                if stim_1.status == STARTED and t >= frameRemains:
                    stim_1.stop()  # stop the sound (if longer than duration)
                    send_trigger(100) # send sound off trigger
                    # get info on stim end
                thisExp.addData('stimulus_end_global',globalClock.getTime())
                thisExp.addData('stimulus_end_routineTimer',routineTimer.getTime())
                
                # check if all components have finished
                if not continueRoutine:  # a component has requested a forced-end of Routine
                    break
                continueRoutine = False  # will revert to True if at least one component still running
                for thisComponent in trialComponents:
                    if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
                        continueRoutine = True
                        break  # at least one component has not yet finished

                # check for quit (the Esc key)
                if endExpNow or event.getKeys(keyList=["escape"]):
                    core.quit()

                # refresh the screen
                if continueRoutine:  # don't flip if this routine is over or we'll get a blank screen
                    win.flip()




            stim_1.stop()  # ensure sound has stopped at end of routine
            thisExp.nextEntry()

            # completed 1 repeats of 'trials'

        thisExp.nextEntry()



        # completed 1 repeats of 'block'

    if stimuli_played == 180:
        movie.setAutoDraw(False)
        send_trigger(300) # END RUN
        thisExp.saveAsWideText(filename+'run3'+'.csv')
        thisExp.saveAsPickle(filename+'run3')
        logging.flush()
        # make sure everything is closed down
        thisExp.abort()  # or data files will save again on exit
        win.close()
示例#60
0
#!/usr/bin/python3
# vim:fileencoding=utf-8:sw=4:et

from __future__ import print_function, unicode_literals, absolute_import, division
import sys
import os
import io
import logging
import pathlib
import json

NATIVE = sys.getfilesystemencoding()


def parse_tvlist(lines):
    result = []
    separation = list("|,,;;")
    for line in lines:
        if line.startswith("#"): continue
        line = line.strip()
        if len(line) == 0: continue
        for sep in separation:
            cid, s, src = line.partition(sep)
            if s: break
        if s:
            result.append([cid.strip(), src.strip()])

    return result


def setup_log(log_level=None):