def get(lang): # str -> bool filename = DICS[lang] url = DIC_URL % filename minsize = MIN_DIC_SIZE path = TMP_DIR + '/' + filename targetpath = TARGET_DIR + '/' + filename dprint("enter: url = %s, minsize = %s" % (url, minsize)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): # gzip=True to automatically extract gzip # flush=false to use more memory to reduce disk access if sknetio.getfile(url, path, flush=False, gzip=True): ok = skfileio.filesize(path) > minsize if ok: os.renames(path, targetpath) elif os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(): # -> bool targetpath = INST_DIR + '/' + APPLOC_FILENAME tmppath = TMP_DIR + '/' + APPLOC_FILENAME url = APPLOC_URL size = APPLOC_FILESIZE dprint("enter: size = %s, url = %s" % (size, url)) from sakurakit import skfileio if os.path.exists(targetpath) and skfileio.filesize(targetpath) == size: dprint("leave: already downloaded") return True from sakurakit import sknetio ok = False with SkProfiler("fetch"): if sknetio.getfile(url, tmppath, flush=False): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(tmppath) == size if ok: os.rename(tmppath, targetpath) if not ok and os.path.exists(tmppath): skfileio.removefile(tmppath) dprint("leave: ok = %s" % ok) return ok
def get(): # -> bool url = DIC_URL minsize = MIN_DIC_SIZE path = TMP_DIR + '/' + DIC_FILENAME path_compressed = path + '.gz' dprint("enter: url = %s, minsize = %s" % (url, minsize)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False import gzip from sakurakit import skfileio, sknetio with SkProfiler("fetch"): # gzip=True to automatically extract gzip # flush=false to use more memory to reduce disk access if sknetio.getfile(url, path_compressed, flush=False, gzip=False): # Note: gzip=True does not extract gzip, it decompresses the header ... probs? >_< with gzip.open(path_compressed, 'rb') as f_in, open(path, 'wb') as f_out: f_content = f_in.read() f_out.write(f_content) ok = skfileio.filesize(path) > minsize if ok: skfileio.removefile(path_compressed) elif os.path.exists(path): skfileio.removefile(path) skfileio.removefile(path_compressed) # if not ok and os.path.exists(path): # skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(dic): # str -> bool url = DIC_URL + DICS[dic]['file'] path = TMP_DIR + '/' + DICS[dic]['file'] dprint("enter: dic = %s, url = %s" % (dic, url)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): if sknetio.getfile(url, path, flush=False): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) == DICS[dic]['size'] if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(family): # str -> bool font = FONTS[family] url = font['dl'] path = TMP_DIR + '/font-%s.%s' % (family, font['format']) dprint("enter: family = %s, url = %s" % (family, url)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): if sknetio.getfile(url, path, flush=False): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) == font['size'] if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(): # return bool url = UNIDIC_URL path = TMP_DIR + '/' + UNIDIC_FILENAME + UNIDIC_SUFFIX size = UNIDIC_FILESIZE dprint("enter: size = %s, url = %s" % (size, url)) from sakurakit import skfileio if os.path.exists(path) and skfileio.filesize(path) == size: dprint("leave: already downloaded") return True from sakurakit import sknetio ok = False with SkProfiler("fetch"): if sknetio.getfile(url, path, flush=False): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) == size if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(): # -> bool url = DIC_URL minsize = MIN_DIC_SIZE path = TMP_DIR + '/' + DIC_FILENAME dprint("enter: url = %s, minsize = %s" % (url, minsize)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): # gzip=True to automatically extract gzip # flush=false to use more memory to reduce disk access if sknetio.getfile(url, path, flush=False, gzip=True): ok = skfileio.filesize(path) > minsize if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(lang): # str -> bool dprint("enter: lang = %s" % lang) url = URL + DICS[lang]['file'] path = TMP_DIR + '/' + FILENAME_TPL % lang dprint("enter: url = %s" % url) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): if sknetio.getfile( url, path, flush=False ): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) > MIN_FILESIZE if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def get(dic): # str -> bool url = URL_TPL % dic minsize = MIN_FILESIZE path = TMP_DIR + '/' + FILENAME_TPL % dic dprint("enter: url = %s, minsize = %s" % (url, minsize)) #from sakurakit import skfileio #if os.path.exists(path) and skfileio.filesize(path) == size: # dprint("leave: already downloaded") # return True ok = False from sakurakit import skfileio, sknetio with SkProfiler("fetch"): if sknetio.getfile( url, path, flush=False ): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) > minsize if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
def getld(lang): # str -> bool url = DICS[lang].get( 'url') or "http://%s/pub/lingoes/%s.ld2" % (initdefs.DOMAIN_ORG, lang) size = DICS[lang]['size'] path = LD_DIR + '/' + lang + LD_SUFFIX dprint("enter: lang = %s, size = %s" % (lang, size)) from sakurakit import skfileio if os.path.exists(path) and skfileio.filesize(path) == size: dprint("leave: already downloaded") return True ok = False from sakurakit import sknetio with SkProfiler("fetch"): if sknetio.getfile( url, path, flush=False ): # flush=false to use more memory to reduce disk access ok = skfileio.filesize(path) == size if not ok and os.path.exists(path): skfileio.removefile(path) dprint("leave: ok = %s" % ok) return ok
dprint(url) if url.startswith('file:///'): url = url.replace('file:///', '') import shutil try: shutil.copy(url, path) except Exception, e: dwarn(e) else: noredirects = url.startswith( "http://pics.dmm.co.jp/" ) #or url.startswith("http://media.erogetrailers.com/img/") from sakurakit import sknetio #skthreads.runasync(partial(sknetio.getfile, url, path)) sknetio.getfile(url, path, allow_redirects=not noredirects, mimefilter=sknetio.IMAGE_MIME_FILTER) def _getimages(l, path=None): """ @param l [(url, path)] @param* path unicode """ for args in l: _getimage(*args) if path: osutil.open_location(path) class GameCoffeeBean(QObject):