示例#1
0
def decode_natural(compressed):
    bits = util.decompress(util.decompress(compressed))

    wave_len = byte_to_int(bits[:32].tobytes())
    wave_small_len = byte_to_int(bits[32:64].tobytes())

    data = bits[64:]

    wp_Y = decode_wavelets(data[:wave_len])
    wp_Cb = decode_wavelets(data[wave_len:(wave_len + wave_small_len)])
    wp_Cr = decode_wavelets(
        data[(wave_len + wave_small_len):(wave_len + 2*wave_small_len)])

    Y = wp_Y.reconstruct()
    Cb = wp_Cb.reconstruct()
    Cr = wp_Cr.reconstruct()

    Cb = inter_resample(Cb, 1/2.0)
    Cr = inter_resample(Cr, 1/2.0)

    h = min(Y.shape[0], Cb.shape[0])
    w = min(Y.shape[1], Cb.shape[1])
    
    Y = Y[0:h, 0:w]
    Cb = Cb[0:h, 0:w]
    Cr = Cr[0:h, 0:w]
    
    ycbcr = np.dstack((Y, Cb, Cr))
    rgb = ycbcr_to_rgb(ycbcr)

    return rgb
示例#2
0
 def load_index_file(self):
     util.decompress(conf.word_set_path, conf.word_set_path + ".decompress")
     util.decompress(conf.word2id_map_path,
                     conf.word2id_map_path + ".decompress")
     util.decompress(conf.index_path, conf.index_path + ".decompress")
     util.decompress(conf.doc_length_path,
                     conf.doc_length_path + ".decompress")
     util.decompress(conf.D_path, conf.D_path + ".decompress")
     with open(conf.word_set_path + ".decompress", 'rb') as input_file:
         self.word_set = pickle.loads(input_file.read())
     with open(conf.word2id_map_path + ".decompress", 'rb') as input_file:
         self.word2id_map = pickle.loads(input_file.read())
     with open(conf.index_path + ".decompress", 'rb') as input_file:
         self.index = pickle.loads(input_file.read())
     with open(conf.doc_length_path + ".decompress", 'rb') as input_file:
         self.doc_length = pickle.loads(input_file.read())
     with open(conf.D_path + ".decompress", 'rb') as input_file:
         self.D = pickle.loads(input_file.read())
     self.W = len(self.word_set)
     os.remove(conf.word_set_path + ".decompress")
     os.remove(conf.word2id_map_path + ".decompress")
     os.remove(conf.index_path + ".decompress")
     os.remove(conf.doc_length_path + ".decompress")
     os.remove(conf.D_path + ".decompress")
     print("Load index from file successfully.")
示例#3
0
 def respond (self, path, header_only=False):
     if os.path.isdir(path):
         path = os.path.join(path, 'index.html')
     try:
         with open(path+'.huf', 'rb') as compressed:
             self.send_response(200)
             self.send_header('Content-type', self.guess_type(path))
             self.end_headers()
             if not header_only:
                 util.decompress(compressed, self.wfile)
     except OSError:
         self.send_error(404, "Not found: {}.huf".format(path))
示例#4
0
def pip_download_install():
    url = 'https://pypi.python.org/packages/source/p/pip/pip-6.0.8.tar.gz'
    target = 'pip-6.0.8.tar.gz'
    targetdir = 'pip-6.0.8'
    print('============ downloading ' + target + ' from:' + url)
    util.download(url,target)
    print('============ extracting ' + target)
    util.decompress(target,'.')
    os.chdir(targetdir)
    print('============ installing pip')
    cmdResult = os.popen('python setup.py install').readlines()
    util.printCommandResult(cmdResult)
    print('============ installed,plese add pip to your path')
示例#5
0
def pip_download_install():
    url = 'https://pypi.python.org/packages/source/p/pip/pip-6.0.8.tar.gz'
    target = 'pip-6.0.8.tar.gz'
    targetdir = 'pip-6.0.8'
    print('============ downloading ' + target + ' from:' + url)
    util.download(url, target)
    print('============ extracting ' + target)
    util.decompress(target, '.')
    os.chdir(targetdir)
    print('============ installing pip')
    cmdResult = os.popen('python setup.py install').readlines()
    util.printCommandResult(cmdResult)
    print('============ installed,plese add pip to your path')
示例#6
0
文件: shuffle.py 项目: GoSteven/dpark
 def fetch_one(self, uri, shuffleId, part, reduceId):
     if uri == LocalFileShuffle.getServerUri():
         urlopen, url = (file, LocalFileShuffle.getOutputFile(shuffleId, part, reduceId))
     else:
         urlopen, url = (urllib.urlopen, "%s/%d/%d/%d" % (uri, shuffleId, part, reduceId))
     logger.debug("fetch %s", url)
     
     tries = 4
     while True:
         try:
             f = urlopen(url)
             d = f.read()
             flag = d[:1]
             length, = struct.unpack("I", d[1:5])
             if length != len(d):
                 raise IOError("length not match: expected %d, but got %d" % (length, len(d)))
             d = decompress(d[5:])
             f.close()
             if flag == 'm':
                 d = marshal.loads(d)
             elif flag == 'p':
                 d = cPickle.loads(d)
             else:
                 raise ValueError("invalid flag")
             return d
         except Exception, e:
             logger.debug("Fetch failed for shuffle %d, reduce %d, %d, %s, %s, try again",
                     shuffleId, reduceId, part, url, e)
             tries -= 1
             if not tries:
                 logger.error("Fetch failed for shuffle %d, reduce %d, %d, %s, %s", 
                         shuffleId, reduceId, part, url, e)
                 raise
             time.sleep(2**(3-tries))
示例#7
0
def v0_fic_all(urlId: str) -> Any:
    fics = Fic.select({'urlId': urlId})
    if len(fics) != 1:
        return Err.urlId_not_found.get()
    fic = fics[0]
    if fic.chapterCount is None:
        print(f'err: fic has no chapter count: {fic.id}')
        return Err.urlId_not_found.get()
    ficChapters = {
        fc.chapterId: fc
        for fc in FicChapter.select({'ficId': fic.id})
    }
    chapters = {}
    for cid in range(1, fic.chapterCount + 1):
        if cid not in ficChapters:
            return Err.cid_not_found.get({'arg': f'{fic.id}/{cid}'})
        chapter = ficChapters[cid]
        cres = chapter.toJSONable()
        try:
            content = cres['content']
            if content is not None:
                content = util.decompress(content)
                content = scrape.decodeRequest(content, f'{fic.id}/{cid}')
                content = cleanHtml(content)
                if content != cleanHtml(content):
                    print(
                        f'v0_fic_all: {fic.id}/{cid} did not round-trip through cleanHtml'
                    )
            cres['content'] = content
            chapters[cid] = cres
        except:
            pass

    res = fic.toJSONable()
    return Err.ok({'info': res, 'chapters': chapters})
示例#8
0
 def create_chapter(self, chapter_file):
     """create chapter from json-file"""
     try:
         item = json.load(open(chapter_file, 'r'))
         xhtml_file = "chap_{}.xhtml".format(
             str(item['chapter_no']).zfill(4))
         if not item["body"]:
             self.logger.error("body is empty (file: %s)", chapter_file)
             return epub.EpubHtml()
         # decompress
         decompressed = util.decompress(item["body"])
         if util.isbase64(decompressed):
             self.logger.error(
                 "still base64 encoded body after decompressing? (file: %s)",
                 chapter_file)
             return epub.EpubHtml()
         body = self.clean_body(decompressed,
                                item['chapter_title']) or decompressed
         chapter = epub.EpubHtml(lang='en',
                                 file_name=xhtml_file,
                                 uid=str(item['chapter_no']),
                                 content=body,
                                 title=item['chapter_title'])
         chapter.add_link(href="../Styles/ChapterStyle.css",
                          rel='stylesheet',
                          type='text/css')
         # end for
         return chapter
     except json.decoder.JSONDecodeError as e:
         self.logger.error("file:%s\nmsg:%s" % (chapter_file, e))
         return epub.EpubHtml()
示例#9
0
 def recvBroadcast(self):
     d = decompress(open(self.path, 'rb').read())
     try:
         self.value = marshal.loads(d)
     except Exception:
         self.value = cPickle.loads(d)
     logger.debug("load from %s", self.path)
示例#10
0
def install_talib_for_linux():
    url = 'http://downloads.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz'
    target = 'ta-lib-0.4.0-src.tar.gz'
    util.download(url, target)
    util.decompress(target, '.')
    os.chdir('ta-lib')
    print('==========configure ta-lib============')
    result = os.popen('./configure').readlines()
    util.printCommandResult(result)
    print('==========configure end   ============')
    print('==========make ta-lib ================')
    result = os.popen('make').readlines()
    util.printCommandResult(result)
    print('==========make ta-lib end ============')
    print('==========make install tab-lib =======')
    result = os.popen('make install').readlines()
    util.printCommandResult(result)
    print('==========make install tab-lib end =======')
示例#11
0
def install_talib_for_linux():
	url = 'http://downloads.sourceforge.net/project/ta-lib/ta-lib/0.4.0/ta-lib-0.4.0-src.tar.gz'
	target = 'ta-lib-0.4.0-src.tar.gz'
	util.download(url,target)
	util.decompress(target,'.')
	os.chdir('ta-lib')
	print('==========configure ta-lib============')
	result = os.popen('./configure').readlines()
	util.printCommandResult(result)
	print('==========configure end   ============')
	print('==========make ta-lib ================')
	result = os.popen('make').readlines()
	util.printCommandResult(result)
	print('==========make ta-lib end ============')
	print('==========make install tab-lib =======')
	result = os.popen('make install').readlines()
	util.printCommandResult(result)
	print('==========make install tab-lib end =======')
示例#12
0
    def statusUpdate(self, driver, status):
        tid = status.task_id.value
        state = status.state
        logger.debug("status update: %s %s", tid, state)

        jid = self.taskIdToJobId.get(tid)
        if jid not in self.activeJobs:
            logger.debug(
                "Ignoring update from TID %s " + "because its job is gone",
                tid)
            return

        job = self.activeJobs[jid]
        _, task_id, tried = map(int, tid.split(':'))
        if state == mesos_pb2.TASK_RUNNING:
            return job.statusUpdate(task_id, tried, state)

        del self.taskIdToJobId[tid]
        self.jobTasks[jid].remove(tid)
        slave_id = self.taskIdToSlaveId[tid]
        if slave_id in self.slaveTasks:
            self.slaveTasks[slave_id] -= 1
        del self.taskIdToSlaveId[tid]

        if state in (mesos_pb2.TASK_FINISHED,
                     mesos_pb2.TASK_FAILED) and status.data:
            try:
                task_id, reason, result, accUpdate = cPickle.loads(status.data)
                if result:
                    flag, data = result
                    if flag >= 2:
                        try:
                            data = urllib.urlopen(data).read()
                        except IOError:
                            # try again
                            data = urllib.urlopen(data).read()
                        flag -= 2
                    data = decompress(data)
                    if flag == 0:
                        result = marshal.loads(data)
                    else:
                        result = cPickle.loads(data)
                return job.statusUpdate(task_id, tried, state, reason, result,
                                        accUpdate)
            except Exception, e:
                logger.warning("error when cPickle.loads(): %s, data:%s", e,
                               len(status.data))
                state = mesos_pb2.TASK_FAILED
                return job.statusUpdate(task_id, tried, mesos_pb2.TASK_FAILED,
                                        'load failed: %s' % e)
示例#13
0
def decode_cartoon(compressed):
    uncompressed = util.decompress(compressed)
    height = byte_to_int(uncompressed[:32].tobytes())
    width = byte_to_int(uncompressed[32:64].tobytes())
    low = byte_to_float(uncompressed[64:96].tobytes())
    high = byte_to_float(uncompressed[96:128].tobytes())

    ycrcb = np.array(bytearray(uncompressed[128:]))
    ycrcb = unquantize(ycrcb, low, high)
    ycrcb = ycrcb.reshape((height, width, 3))
    
    rgb = ycbcr_to_rgb(ycrcb)
    
    return rgb
示例#14
0
文件: schedule.py 项目: guibog/dpark
    def statusUpdate(self, driver, status):
        tid = status.task_id.value
        state = status.state
        logger.debug("status update: %s %s", tid, state)

        jid = self.taskIdToJobId.get(tid)
        if jid not in self.activeJobs:
            logger.debug("Ignoring update from TID %s " +
                "because its job is gone", tid)
            return

        job = self.activeJobs[jid]
        _, task_id, tried = map(int, tid.split(':'))
        if state == mesos_pb2.TASK_RUNNING:
            return job.statusUpdate(task_id, tried, state)

        del self.taskIdToJobId[tid]
        self.jobTasks[jid].remove(tid)
        slave_id = self.taskIdToSlaveId[tid]
        if slave_id in self.slaveTasks:
            self.slaveTasks[slave_id] -= 1
        del self.taskIdToSlaveId[tid]

        if state in (mesos_pb2.TASK_FINISHED, mesos_pb2.TASK_FAILED) and status.data:
            try:
                task_id,reason,result,accUpdate = cPickle.loads(status.data)
                if result:
                    flag, data = result
                    if flag >= 2:
                        try:
                            data = urllib.urlopen(data).read()
                        except IOError:
                            # try again
                            data = urllib.urlopen(data).read()
                        flag -= 2
                    data = decompress(data)
                    if flag == 0:
                        result = marshal.loads(data)
                    else:
                        result = cPickle.loads(data)
                return job.statusUpdate(task_id, tried, state,
                    reason, result, accUpdate)
            except Exception, e:
                logger.warning("error when cPickle.loads(): %s, data:%s", e, len(status.data))
                state = mesos_pb2.TASK_FAILED
                return job.statusUpdate(task_id, tried, mesos_pb2.TASK_FAILED, 'load failed: %s' % e)
示例#15
0
文件: tasks.py 项目: FBK-WED/wed-pipe
def _handle_recursion(scheduler, file_meta):
    """Handle archive dataset recursion."""
    loggy = local.logger
    wf_input_params = []
    wf_exec_results = []

    # Decompress archive
    archive = file_meta['out_file']
    loggy.info('Handing recursion on archive [%s]', archive)
    expanded_archive = decompress(file_meta)
    loggy.info('Archive expanded into [%s] dir', expanded_archive)

    # Iterate archive content
    for path, dummy, files in os.walk(expanded_archive):
        for name in files:
            file_ = os.path.join(path, name)
            if not os.path.isfile(file_):
                continue

            # skip hidden (or weird) files
            if name.startswith(('.', '__')):
                continue

            # Apply dispatcher for every content file
            new_file_meta = file_meta_from_file(file_)
            new_file_meta.update(
                {'file_path': file_[len(expanded_archive):]})
            loggy.info(
                'Processing archive file [%s] with metadata %s',
                file_, new_file_meta
            )
            try:
                wfin, wfout = evaluate_dispatcher_and_run_workflow(
                    scheduler, new_file_meta
                )
            except:
                loggy.exception('Error while processing file [%s]', file_)
                raise
            else:
                wf_input_params += wfin
                wf_exec_results += wfout
                loggy.info('File [%s] completed', file_)

    return wf_input_params, wf_exec_results
示例#16
0
def getMostRecentScrapeWithMeta(
        url: str,
        ulike: str = None,
        status: Optional[int] = 200,
        beforeId: Optional[int] = None) -> Optional[ScrapeMeta]:
    conn = openMinerva()
    curs = conn.cursor()
    stmt = 'select id, created, url, response, status from web where '
    clauses: List[str] = []
    whereData: List[Any] = []
    if status is not None:
        clauses += ['status = %s']
        whereData += [status]
    if beforeId is not None:
        clauses += ['id <= %s']
        whereData += [beforeId]

    if ulike is None:
        clauses += ['url = %s']
        whereData += [url]
    else:
        clauses += ['url like %s']
        whereData += [ulike]

    stmt += ' and '.join(clauses)
    stmt += ' order by id desc'

    curs.execute(stmt, tuple(whereData))
    res = curs.fetchone()

    curs.close()
    if res is None:
        return None

    response = res[3]
    if response is not None:
        response = util.decompress(response.tobytes()).decode('utf-8')

    return {
        'url': res[2],
        'fetched': res[1],
        'raw': response,
        'status': res[4]
    }
示例#17
0
文件: tasks.py 项目: FBK-WED/wed-pipe
def _handle_recursion(scheduler, file_meta):
    """Handle archive dataset recursion."""
    loggy = local.logger
    wf_input_params = []
    wf_exec_results = []

    # Decompress archive
    archive = file_meta['out_file']
    loggy.info('Handing recursion on archive [%s]', archive)
    expanded_archive = decompress(file_meta)
    loggy.info('Archive expanded into [%s] dir', expanded_archive)

    # Iterate archive content
    for path, dummy, files in os.walk(expanded_archive):
        for name in files:
            file_ = os.path.join(path, name)
            if not os.path.isfile(file_):
                continue

            # skip hidden (or weird) files
            if name.startswith(('.', '__')):
                continue

            # Apply dispatcher for every content file
            new_file_meta = file_meta_from_file(file_)
            new_file_meta.update({'file_path': file_[len(expanded_archive):]})
            loggy.info('Processing archive file [%s] with metadata %s', file_,
                       new_file_meta)
            try:
                wfin, wfout = evaluate_dispatcher_and_run_workflow(
                    scheduler, new_file_meta)
            except:
                loggy.exception('Error while processing file [%s]', file_)
                raise
            else:
                wf_input_params += wfin
                wf_exec_results += wfout
                loggy.info('File [%s] completed', file_)

    return wf_input_params, wf_exec_results
示例#18
0
    def fetch_one(self, uri, shuffleId, part, reduceId):
        if uri == LocalFileShuffle.getServerUri():
            urlopen, url = (file,
                            LocalFileShuffle.getOutputFile(
                                shuffleId, part, reduceId))
        else:
            urlopen, url = (urllib.urlopen,
                            "%s/%d/%d/%d" % (uri, shuffleId, part, reduceId))
        logger.debug("fetch %s", url)

        tries = 4
        while True:
            try:
                f = urlopen(url)
                d = f.read()
                flag = d[:1]
                length, = struct.unpack("I", d[1:5])
                if length != len(d):
                    raise IOError("length not match: expected %d, but got %d" %
                                  (length, len(d)))
                d = decompress(d[5:])
                f.close()
                if flag == 'm':
                    d = marshal.loads(d)
                elif flag == 'p':
                    d = cPickle.loads(d)
                else:
                    raise ValueError("invalid flag")
                return d
            except Exception, e:
                logger.debug(
                    "Fetch failed for shuffle %d, reduce %d, %d, %s, %s, try again",
                    shuffleId, reduceId, part, url, e)
                tries -= 1
                if not tries:
                    logger.error(
                        "Fetch failed for shuffle %d, reduce %d, %d, %s, %s",
                        shuffleId, reduceId, part, url, e)
                    raise
                time.sleep(2**(3 - tries))
示例#19
0
 def html(self) -> Optional[str]:
     if self.content is None:
         return None
     return str(util.decompress(self.content), 'utf-8')
示例#20
0
def run_decompressor(filename):
    with open(filename, 'rb') as compressed:
        with open(filename + '.decomp', 'wb') as uncompressed:
            util.decompress(compressed, uncompressed)
示例#21
0
 def unBlockifyObject(self, blocks):
     s = decompress(''.join(b.data for b in blocks))
     try:
         return marshal.loads(s)
     except Exception :
         return cPickle.loads(s)