예제 #1
0
def calculate_additional_texts_similarity():
    for essay_num in range(1,10+1):
        # opens additional texts
        print essay_num

        additional_texts = []
        filelist = os.listdir("data/external/additional_texts/essay_%d" % (essay_num,))
        for filename in filelist:
            text = open("data/external/additional_texts/essay_%d/%s" % (essay_num,filename)).read()
            additional_texts.append(text)

        if len(additional_texts) > 0:
            output = open("features/essay_%d/additional_text_similarity" % (essay_num,), "w")
            for ind, this_essay in enumerate(essays.get_essay_set(essay_num)):
                essay_text = this_essay.get_text("proc")
                random_text = word_generator(len(essay_text))
                similarities = []
                for additional_text in additional_texts:
                    key_info = zlib.compress(additional_text,9)
                    key_plus_essay_text = zlib.compress(additional_text + essay_text,9)
                    key_plus_random_text = zlib.compress(additional_text + random_text,9)
                    similarities.append((len(key_plus_essay_text) - len(key_info)) / (len(key_plus_random_text) - len(key_info)))
                output.write("%s\n" % (",".join([str(k) for k in similarities])))
                if ind % 100 == 0:
                    print ".",
            print
            output.close()
예제 #2
0
    def test_bad_itxt(self):

        im = load(HEAD + chunk(b'iTXt') + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam') + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0') + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0\x02') + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0\0\0foo\0') + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0\0\0en\0Spam\0egg') + TAIL)
        self.assertEqual(im.info, {"spam": "egg"})
        self.assertEqual(im.info["spam"].lang, "en")
        self.assertEqual(im.info["spam"].tkey, "Spam")

        im = load(HEAD + chunk(b'iTXt', b'spam\0\1\0en\0Spam\0' + zlib.compress(b"egg")[:1]) + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0\1\1en\0Spam\0' + zlib.compress(b"egg")) + TAIL)
        self.assertEqual(im.info, {})

        im = load(HEAD + chunk(b'iTXt', b'spam\0\1\0en\0Spam\0' + zlib.compress(b"egg")) + TAIL)
        self.assertEqual(im.info, {"spam": "egg"})
        self.assertEqual(im.info["spam"].lang, "en")
        self.assertEqual(im.info["spam"].tkey, "Spam")
 def test_download_two_files(self):
   # Test downloading two files.
   actual = {}
   def out(key, generator):
     actual[key] = ''.join(generator)
   self.mock(isolateserver, 'file_write', out)
   server = 'http://example.com'
   self._requests = [
     (
       server + '/content-gs/retrieve/default-gzip/sha-1',
       {'read_timeout': 60, 'retry_404': True, 'headers': None},
       zlib.compress('Coucou'),
       None,
     ),
     (
       server + '/content-gs/retrieve/default-gzip/sha-2',
       {'read_timeout': 60, 'retry_404': True, 'headers': None},
       zlib.compress('Bye Bye'),
       None,
     ),
   ]
   cmd = [
     'download',
     '--isolate-server', server,
     '--target', ROOT_DIR,
     '--file', 'sha-1', 'path/to/a',
     '--file', 'sha-2', 'path/to/b',
   ]
   self.assertEqual(0, isolateserver.main(cmd))
   expected = {
     os.path.join(ROOT_DIR, 'path/to/a'): 'Coucou',
     os.path.join(ROOT_DIR, 'path/to/b'): 'Bye Bye',
   }
   self.assertEqual(expected, actual)
예제 #4
0
def calculate_description_similarities():
    for essay_num in range(1,10+1):
        essay_description = open("data/descriptions/essay_%d_description.txt" % (essay_num,)).read()
        description_infovalue = len(zlib.compress(essay_description,9))
        print essay_num
        output = open("features/essay_%d/description_similarity" % (essay_num,),"w")
        for essay in essays.get_essay_set(essay_num):
            
            essay_text = essay.get_text("proc")
            random_text = word_generator(len(essay_text))
            if len(essay_text) > 0:
                essay_infovalue = len(zlib.compress(essay_description + essay_text,9))
                essay_infovalue_dummy = len(zlib.compress(essay_description + random_text,9))

                essay_infovalue_length_raw = essay_infovalue - description_infovalue
                if len(essay_text) != 0:
                    essay_infovalue_length_norm = (essay_infovalue - description_infovalue) / len(essay_text)
                else:
                    essay_infovalue_length_norm = 0

                if (description_infovalue - essay_infovalue_dummy) != 0:
                    essay_infovalue_length_norm2 = (description_infovalue - essay_infovalue) / (description_infovalue - essay_infovalue_dummy)
                else:
                    essay_infovalue_length_norm2 = 0
            else:
                essay_infovalue_length_raw = -1
                essay_infovalue_length_norm = -1
                essay_infovalue_length_norm2 = -1
                
            output.write("%.6f,%.6f,%.6f\n" % (essay_infovalue_length_raw, essay_infovalue_length_norm, essay_infovalue_length_norm2))
        output.close()
예제 #5
0
    def add(self, entry):
        if self.os is None:
            import os

            self.os = os
        nm = entry[0]
        pth = entry[1]
        base, ext = self.os.path.splitext(self.os.path.basename(pth))
        ispkg = base == "__init__"
        try:
            txt = open(pth[:-1], "r").read() + "\n"
        except (IOError, OSError):
            try:
                f = open(pth, "rb")
                f.seek(8)  # skip magic and timestamp
                bytecode = f.read()
                marshal.loads(bytecode).co_filename  # to make sure it's valid
                obj = zlib.compress(bytecode, self.LEVEL)
            except (IOError, ValueError, EOFError, AttributeError):
                raise ValueError("bad bytecode in %s and no source" % pth)
        else:
            txt = iu._string_replace(txt, "\r\n", "\n")
            try:
                co = compile(txt, "%s/%s" % (self.path, nm), "exec")
            except SyntaxError, e:
                print "Syntax error in", pth[:-1]
                print e.args
                raise
            obj = zlib.compress(marshal.dumps(co), self.LEVEL)
예제 #6
0
    def post(self):
        args = self.request.arguments
        retval = {"query": None, "zip": None}
        if "code" in args:
            code = "".join(args["code"])
            language = "".join(args.get("language", ["sage"]))
        else:
            self.send_error(400)
            return
        interacts = "".join(args.get("interacts", ["[]"]))
        import zlib, base64
        retval["zip"] = base64.urlsafe_b64encode(zlib.compress(code))
        retval["query"] = yield gen.Task(self.application.db.new_exec_msg,
            code.decode("utf8"), language, interacts.decode("utf8"))
        if "interacts" in args:
            retval["interacts"] = base64.urlsafe_b64encode(zlib.compress(interacts))
        if "n" in args:
            retval["n"] = int("".join(args["n"]))
        if "frame" not in args:
            self.set_header("Access-Control-Allow-Origin", self.request.headers.get("Origin", "*"))
            self.set_header("Access-Control-Allow-Credentials", "true")
        else:
            retval = '<script>parent.postMessage(%r,"*");</script>' % (json.dumps(retval),)
            self.set_header("Content-Type", "text/html")

        self.write(retval)
        self.finish()
예제 #7
0
 def add(self, entry):
     if self.os is None:
         import os
         self.os = os
     nm = entry[0]
     pth = entry[1]
     base, ext = self.os.path.splitext(self.os.path.basename(pth))
     ispkg = base == '__init__'
     try:
         txt = open(pth[:-1], 'rU').read() + '\n'
     except (IOError, OSError):
         try:
             f = open(pth, 'rb')
             f.seek(8)  # skip magic and timestamp
             bytecode = f.read()
             marshal.loads(bytecode).co_filename  # to make sure it's valid
             obj = zlib.compress(bytecode, self.LEVEL)
         except (IOError, ValueError, EOFError, AttributeError):
             raise ValueError("bad bytecode in %s and no source" % pth)
     else:
         txt = txt.replace('\r\n', '\n')
         try:
             import os
             co = compile(txt, self.os.path.join(self.path, nm), 'exec')
         except SyntaxError, e:
             print "Syntax error in", pth[:-1]
             print e.args
             raise
         obj = zlib.compress(marshal.dumps(co), self.LEVEL)
예제 #8
0
    def update_cache_for_url(self, urlobj, filename, urldata, contentlen, lastmodified, tag):
        """ Method to update the cache information for the URL 'url'
        associated to file 'filename' on the disk """

        # if page caching is disabled, skip this...
        if not objects.config.pagecache:
            return
        
        url = urlobj.get_full_url()
        if urldata:
            csum = sha.new(urldata).hexdigest()
        else:
            csum = ''
            
        # Update all cache keys
        content = self.cache._url[url]
        if content:
            rec = content[0]
            self.cache.update(rec, checksum=csum, location=filename,content_length=contentlen, 
                              last_modified=lastmodified,etag=tag, updated=True)
            if self._cfg.datacache:
                self.cache.update(rec,data=zlib.compress(urldata))
        else:
            # Insert as new values
            if self._cfg.datacache:
                self.cache.insert(url=url, checksum=csum, location=filename,content_length=contentlen,last_modified=lastmodified,
                                  etag=tag, updated=True,data=zlib.compress(urldata))
            else:
                self.cache.insert(url=url, checksum=csum, location=filename,content_length=contentlen, last_modified=lastmodified,
                                  etag=tag, updated=True)                
예제 #9
0
    def put(self, entity, tag=None):
        is_update = False
        entity['updated'] = time.time()
        entity_id = None

        entity_copy = entity.copy()

        # get the entity_id (or create a new one)
        entity_id = entity_copy.pop('id', None)
        if entity_id is None:
            entity_id = raw_guid()
        else:
            is_update = True
            if len(entity_id) != 16:
                if sys.version_info[0] == 2:
                    entity_id = entity_id.decode('hex')
                else:
                    entity_id = codecs.decode(entity_id, "hex_codec")
        body = simplejson.dumps(entity_copy)
        if self.use_zlib:
            if sys.version_info[0] == 2:
                body = zlib.compress(body, 1)
            else:
                body = zlib.compress(to_bytes(body), 1)

        if is_update:
            self._put_update(entity_id, entity_copy, body)
            return entity
        else:
            return self._put_new(entity_id, entity_copy, tag, body)
예제 #10
0
def _stop_recording():
    """Get the current recorder and log the profiling data."""
    rec = recorder_proxy.get_for_current_request()
    if rec is not None:
        # update _local_cache if this is the first time or it's been 10 min
        if _local_cache.get('last_check', 0) + 600 < int(time.time()):
            _local_cache['use_plaintext'] = bool(memcache.get('profile-plaintext'))
            _local_cache['last_check'] = int(time.time())

        profile_data = rec.get_profile_data()

        calls = _split_profile(profile_data['calls'],
                               100 if _local_cache['use_plaintext'] else 800)

        profile_data['calls'] = calls.pop(0)['calls'] if calls else []

        logging.info("PROFILE: %s",
                     profile_data
                     if _local_cache['use_plaintext'] else
                     base64.b64encode(zlib.compress(json.dumps(profile_data))))

        for more_calls in calls:
            logging.info("PROFILE: %s",
                         profile_data
                         if _local_cache['use_plaintext'] else
                         base64.b64encode(
                             zlib.compress(json.dumps(more_calls))))

    recorder_proxy.clear_for_current_request()
예제 #11
0
def main(ip_address, ssn):
    udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    # Send HI first
    gzip_encoded_hi = zlib.compress("HI")
    udp_sock.sendto(gzip_encoded_hi, (ip_address, THE_PROTOCOL_PORT))

    # Get HI back
    data, addr = udp_sock.recvfrom(len(gzip_encoded_hi))
    if data != gzip_encoded_hi:
        print "Didn't get proper HI back."

    gzip_encoded_ssn = zlib.compress(ssn)
    print "Len: {}".format(len(gzip_encoded_ssn))

    print "Sending {}".format(gzip_encoded_ssn)
    udp_sock.sendto(gzip_encoded_ssn, (ip_address, THE_PROTOCOL_PORT))

    # Now we receieve the file
    readable, _, _ = select.select([udp_sock], [], [], 5000)
    buf = array.array('i', [0]) # The f**k does this work?
    fcntl.ioctl(readable[0].fileno(), termios.FIONREAD, buf, True)

    if buf <= 0:
        print "Nothing on the other side."
        return

    full_file, addr = udp_sock.recvfrom(buf[0])
    gzip_decoded_file = zlib.decompress(full_file)
    print "FILE IS: {}".format(gzip_decoded_file)
예제 #12
0
def borrar_cumple():
    try:
        f = open("calendario.txt","r")#abre un archivo de modo lectura
    except:
        print "no se encontro el archivo calendario.txt"
    else# El código colocado en la cláusula else se ejecuta solo si no se levante una excepción:
        print "Ingrese el nombre y fecha del cumple a borrar"
        fecha = zlib.compress(encriptar(raw_input("Fecha:")))
        nomb = zlib.compress(encriptar(raw_input("Nombre:")))
        dic = pickle.load(f)
        f.close()#cierra el archivo
        if fecha in dic:
            i = 0
            while i < len(dic[fecha]) and dic[fecha][i][0] != nomb:
                    i = i + 1
            if i < len(dic[fecha]):
                del dic[fecha][i]
                if dic[fecha] == []:
                    del dic[fecha]
                f = open("calendario.txt","w")#abre archivo de modo escritura
                pickle.dump(dic, f)
                f.close()#cierra el archivo
            else:
                print "no se encontro el nombre " + zlib.decompress(desencriptar(nomb)) + " en la fecha " + zlib.decompress(desencriptar(fecha))
        else:
            print "no se ha encontrado ningun cumple en la fecha " + zlib.decompress(desencriptar(fecha))
예제 #13
0
파일: utiltest.py 프로젝트: pombr/conary
    def testDecompressStream(self):
        data = os.urandom(16 * 1024)
        compressed = zlib.compress(data)
        fp = StringIO.StringIO(compressed)
        dfo = util.decompressStream(fp)
        check = dfo.read()
        self.assertEqual(check, data)
        fp = StringIO.StringIO(compressed)
        dfo = util.decompressStream(fp)
        chunk = dfo.read(333)
        self.assertEqual(chunk,  data[:333])

        # test readline
        data = 'hello world\nhello world line 2\n'
        compressed = zlib.compress(data)
        fp = StringIO.StringIO(compressed)
        dfo = util.decompressStream(fp)
        line = dfo.readline()
        self.assertEqual(line, 'hello world\n')
        line = dfo.readline()
        self.assertEqual(line, 'hello world line 2\n')

        fp = StringIO.StringIO(compressed)
        dfo = util.decompressStream(fp)
        line = dfo.readline(5)
        self.assertEqual(line, 'hello')
        line = dfo.readline(5)
        self.assertEqual(line, ' worl')
        line = dfo.readline()
        self.assertEqual(line, 'd\n')
예제 #14
0
def agregar_cumple():
    print "***Ingrese los datos del cumpleanios***"
    fecha = zlib.compress(encriptar(raw_input("Fecha: ")))
    nombre = zlib.compress(encriptar(raw_input("Nombre: ")))
    tipo = zlib.compress(encriptar(raw_input("tipo de contacto: ")))
    medio = zlib.compress(encriptar(raw_input("Medio de contacto: ")))
    try:
        f = open("calendario.txt", "r") #abre un archivo de texto en modo lectura
    except:
        crear_calendario()
        print ("Se ha creado un nuevo calendario")
        f = open("calendario.txt","r") #abre un archivo de texto en modo lectura
    finally:#El código colocado en la cláusula finally se ejecuta siemprese haya o no levantado una excepción
        agenda = pickle.load(f) #la funcion load carga el objeto serializado, ya q este es una lista
        if fecha in agenda.keys():#devuelve una lista de todas las claves usadas en el diccionario
            agenda[fecha].append((nombre, tipo, medio))#Añade un objeto al final de la lista

        else:#El código colocado en la cláusula else se ejecuta solo si no se levante una excepción
            agenda[fecha] = [(nombre, tipo, medio)]
        f.close()#cierra el archivo
        f = open("calendario.txt", "w")#abre el archivo de modo escritura
        pickle.dump(agenda, f)#trabaja con una cadena en vez de un archivo donde lo q tiene agenda se lo da a f
        f.close()#cierra el archivo
        print "Se ha agregado el cumple de " + zlib.decompress(desencriptar(nombre)) + " a la agenda"
        print " "
예제 #15
0
def buscar_cumple():
    print "Ingrese 1 para buscar por nombre o 2 para buscar por fecha de cumpleanos (cualquier otro caracter para cancelar)"
    op = raw_input("Opcion:")
    if op == "1":
        try:
            f = open("calendario.txt","r")#Abre el archivo de modo lectura
        except:
            print "aun no ha ingresado ningun cumple!"
        else#El código colocado en la cláusula else se ejecuta solo si no se levante una excepción:
            print "Ingrese el nombre"
            nomb = zlib.compress(encriptar(raw_input("Nombre:")))
            dic = pickle.load(f)#la funcion load carga el objeto serializado, ya q este es una lista
            f.close()#cerrar el archivo
            encontrado = "no"
            for i in dic.keys():#devuelve una lista de todas las claves usadas en el diccionario
                for j in range(len(dic[i])):            
                    if nomb == dic[i][j][0]:
                        print ("Se encontro " + zlib.decompress(desencriptar(dic[i][j][0])) + " el dia " + zlib.decompress(desencriptar(i)))
                        encontrado = "si"
            if encontrado == "no":
                print "***No se hayaron coinsidencias***"
       
    elif op == "2":
        try:
            f = open("calendario.txt","r")#abre el archivo modo lectura
        except:
            print "aun no ha ingresado ningun cumple!"
        else#• El código colocado en la cláusula else se ejecuta solo si no se levante una excepción:
            print "Ingrese la fecha"
            fecha = zlib.compress(encriptar(raw_input("Fecha: ")))
            dic = pickle.load(f)#la funcion load carga el objeto serializado, ya q este es una lista
            f.close()#Cierra el archivo
            if fecha in dic:
                for x in dic[fecha]:
                    print zlib.decompress(desencriptar (x[0])) + ", " + zlib.decompress(desencriptar (x[1])) + ", " + zlib.decompress(desencriptar (x[2])) + "\n"
예제 #16
0
  def testManyChunks(self):
    action = FakeAction()
    uploader = uploading.TransferStoreUploader(action, chunk_size=3)

    with test_lib.AutoTempFilePath() as temp_filepath:
      with open(temp_filepath, "w") as temp_file:
        temp_file.write("1234567890")

      blobdesc = uploader.UploadFilePath(temp_filepath)

      self.assertEqual(action.charged_bytes, 10)
      self.assertEqual(len(action.messages), 4)
      self.assertEqual(action.messages[0].item.data, zlib.compress("123"))
      self.assertEqual(action.messages[1].item.data, zlib.compress("456"))
      self.assertEqual(action.messages[2].item.data, zlib.compress("789"))
      self.assertEqual(action.messages[3].item.data, zlib.compress("0"))

      self.assertEqual(len(blobdesc.chunks), 4)
      self.assertEqual(blobdesc.chunk_size, 3)
      self.assertEqual(blobdesc.chunks[0].offset, 0)
      self.assertEqual(blobdesc.chunks[0].length, 3)
      self.assertEqual(blobdesc.chunks[0].digest, Sha256("123"))
      self.assertEqual(blobdesc.chunks[1].offset, 3)
      self.assertEqual(blobdesc.chunks[1].length, 3)
      self.assertEqual(blobdesc.chunks[1].digest, Sha256("456"))
      self.assertEqual(blobdesc.chunks[2].offset, 6)
      self.assertEqual(blobdesc.chunks[2].length, 3)
      self.assertEqual(blobdesc.chunks[2].digest, Sha256("789"))
      self.assertEqual(blobdesc.chunks[3].offset, 9)
      self.assertEqual(blobdesc.chunks[3].length, 1)
      self.assertEqual(blobdesc.chunks[3].digest, Sha256("0"))
예제 #17
0
def do_chunk(ilines,infile,args):
  """Takes in a the lines from the index file to work on in array form,
     and the bam file name, and the arguments

     returns a list of the necessary data for chimera detection ready for sorting
  """
  ilines = [x.rstrip().split("\t") for x in ilines]
  coord = [int(x) for x in ilines[0][2:4]]
  bf = BAMFile(infile,BAMFile.Options(blockStart=coord[0],innerStart=coord[1]))
  results = []
  for i in range(0,len(ilines)):
    flag = int(ilines[i][5])
    e = bf.read_entry()
    #if not e: break
    value = None
    if e.is_aligned():
      tx = e.get_target_transcript(args.minimum_intron_size)
      value =  {'qrng':e.actual_original_query_range.get_range_string(),'tx':tx.get_gpd_line(),'flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':e.get_aligned_bases_count()}
      results.append(e.entries.qname+"\t"+base64.b64encode(
                                      zlib.compress(
                                       pickle.dumps(value))))
      #results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
    else:
      value =  {'qrng':'','tx':'','flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':0}
      results.append(e.entries.qname+"\t"+base64.b64encode(
                                      zlib.compress(
                                       pickle.dumps(value))))
      #results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
  return results
예제 #18
0
def ncd(x, y):
    if x == y:
        return 0
    z_x = len(zlib.compress(x))
    z_y = len(zlib.compress(y))
    z_xy = len(zlib.compress(x + y))
    return float(z_xy - min(z_x, z_y)) / max(z_x, z_y)
예제 #19
0
  def testCustomOffset(self):
    action = FakeAction()
    uploader = uploading.TransferStoreUploader(action, chunk_size=2)

    with test_lib.AutoTempFilePath() as temp_filepath:
      with open(temp_filepath, "w") as temp_file:
        temp_file.write("0123456")

      blobdesc = uploader.UploadFilePath(temp_filepath, offset=2)

      self.assertEqual(action.charged_bytes, 5)
      self.assertEqual(len(action.messages), 3)
      self.assertEqual(action.messages[0].item.data, zlib.compress("23"))
      self.assertEqual(action.messages[1].item.data, zlib.compress("45"))
      self.assertEqual(action.messages[2].item.data, zlib.compress("6"))

      self.assertEqual(len(blobdesc.chunks), 3)
      self.assertEqual(blobdesc.chunk_size, 2)
      self.assertEqual(blobdesc.chunks[0].offset, 2)
      self.assertEqual(blobdesc.chunks[0].length, 2)
      self.assertEqual(blobdesc.chunks[0].digest, Sha256("23"))
      self.assertEqual(blobdesc.chunks[1].offset, 4)
      self.assertEqual(blobdesc.chunks[1].length, 2)
      self.assertEqual(blobdesc.chunks[1].digest, Sha256("45"))
      self.assertEqual(blobdesc.chunks[2].offset, 6)
      self.assertEqual(blobdesc.chunks[2].length, 1)
      self.assertEqual(blobdesc.chunks[2].digest, Sha256("6"))
예제 #20
0
 def __init__(self, value):
     json = dumps(value)
     avro = schema.dump_report(value)
     self._value = zlib.compress(avro)
     print "json: %i gzip-json: %i avro: %i gzip-avro: %i" % (
         len(json), len(zlib.compress(json)), len(avro), len(self._value))
     self.length = len(self._value)
예제 #21
0
파일: swftag.py 프로젝트: hadashiA/ppswf
        def fset(self, image):
            if isinstance(image, GIF):
                image = image.images[0]

            if image.with_pallete():
                self.__image = image

                self._body_bytes = struct.pack('<HBHHB',
                                               self.cid or 0,
                                               3,
                                               image.width,
                                               image.height,
                                               image.pallete_size - 1,
                                               )
                self._body_bytes += zlib.compress(
                    image.pallete_bytes + \
                    adjust_indices_bytes(image.build_indices(),
                                         image.width))
            else:
                self._body_bytes = struct.pack('<HBHH',
                                               self.cid or 0,
                                               5,
                                               image.width,
                                               image.height,
                                               )
                self._body_bytes += zlib.compress(image.build_xrgb())
예제 #22
0
def create(url=None):
	if not url is None:
		return sys.argv[0]+"?hash="+zlib.compress(url).encode("hex")
	elif not sys.argv[2].startswith("?hash="):
		return sys.argv[0]+"?hash="+zlib.compress(sys.argv[2]).encode("hex")
	else:
		return sys.argv[0]+sys.argv[2]
예제 #23
0
    def toEtree(self):
        msg = Element(('jabber:client', 'iq'))
        msg['type'] = self.type
        msg['id'] = self.id
        msg['from'] = self.from_
        msg['to'] = self.to

        if self.type == 'result':
            ecm_message = msg.addElement('ecm_message')
            ecm_message['version'] = str(AGENT_VERSION_PROTOCOL)
            ecm_message['core'] = str(AGENT_VERSION_CORE)
            ecm_message['command'] = self.command
            ecm_message['signature'] = self.signature

            result = ecm_message.addElement('result')
            result['retvalue'] = self.retvalue
            result['timed_out'] = self.timed_out
            result['partial'] = self.partial

            # compress out
            result.addElement('gzip_stdout').addContent(base64.b64encode(zlib.compress(self.stdout)))
            result.addElement('gzip_stderr').addContent(base64.b64encode(zlib.compress(self.stderr)))
            del ecm_message

        return msg
예제 #24
0
def test_http():
    def gzip_compress(data):
        file_obj = io.BytesIO()
        gzip_file = gzip.GzipFile(fileobj=file_obj, mode='wb')
        gzip_file.write(data)
        gzip_file.close()
        return file_obj.getvalue()

    with http_server({
        '/gzip': lambda env: (
            (gzip_compress(b'<html test=ok>'), [('Content-Encoding', 'gzip')])
            if 'gzip' in env.get('HTTP_ACCEPT_ENCODING', '') else
            (b'<html test=accept-encoding-header-fail>', [])
        ),
        '/deflate': lambda env: (
            (zlib.compress(b'<html test=ok>'),
             [('Content-Encoding', 'deflate')])
            if 'deflate' in env.get('HTTP_ACCEPT_ENCODING', '') else
            (b'<html test=accept-encoding-header-fail>', [])
        ),
        '/raw-deflate': lambda env: (
            # Remove zlib header and checksum
            (zlib.compress(b'<html test=ok>')[2:-4],
             [('Content-Encoding', 'deflate')])
            if 'deflate' in env.get('HTTP_ACCEPT_ENCODING', '') else
            (b'<html test=accept-encoding-header-fail>', [])
        ),
    }) as root_url:
        assert HTML(root_url + '/gzip').etree_element.get('test') == 'ok'
        assert HTML(root_url + '/deflate').etree_element.get('test') == 'ok'
        assert HTML(
            root_url + '/raw-deflate').etree_element.get('test') == 'ok'
예제 #25
0
def hide(img, img_enc, copyright="http://bitbucket.org/cedricbonhomme/stegano", secret_message=None, secret_file=None):
    """
    """
    import shutil
    import datetime
    from zlib import compress
    from zlib import decompress
    from base64 import b64encode
    from .exif.minimal_exif_writer import MinimalExifWriter

    if secret_file != None:
        with open(secret_file, "r") as f:
            secret_file_content = f.read()
    text = "\nImage annotation date: "
    text = text + str(datetime.date.today())
    text = text + "\nImage description:\n"
    if secret_file != None:
        text = compress(b64encode(text + secret_file_content))
    else:
        text = compress(b64encode(text + secret_message))

    try:
        shutil.copy(img, img_enc)
    except Exception as e:
        print(("Impossible to copy image:", e))
        return

    f = MinimalExifWriter(img_enc)
    f.removeExif()
    f.newImageDescription(text)
    f.newCopyright(copyright, addYear=1)
    f.process()
예제 #26
0
def save_signature(fname, _id):
    # 현재 날짜와 시간을 구한다.
    ret_date = k2timelib.get_now_date()
    ret_time = k2timelib.get_now_time()

    # 날짜와 시간 값을 2Byte로 변경한다.
    val_date = struct.pack('<H', ret_date)
    val_time = struct.pack('<H', ret_time)

    # 크기 파일 저장 : ex) script.s01
    sname = '%s.s%02d' % (fname, _id)
    t = zlib.compress(marshal.dumps(set(size_sig)))  # 중복된 데이터 삭제 후 저장
    t = 'KAVS' + struct.pack('<L', len(size_sig)) + val_date + val_time + t
    save_file(sname, t)

    # 패턴 p1 파일 저장 : ex) script.i01
    sname = '%s.i%02d' % (fname, _id)
    t = zlib.compress(marshal.dumps(p1_sig))
    t = 'KAVS' + struct.pack('<L', len(p1_sig)) + val_date + val_time + t
    save_file(sname, t)

    # 패턴 p2 파일 저장 : ex) script.c01
    sname = '%s.c%02d' % (fname, _id)
    t = zlib.compress(marshal.dumps(p2_sig))
    t = 'KAVS' + struct.pack('<L', len(p2_sig)) + val_date + val_time + t
    save_file(sname, t)

    # 악성코드 이름 파일 저장 : ex) script.n01
    sname = '%s.n%02d' % (fname, _id)
    t = zlib.compress(marshal.dumps(name_sig))
    t = 'KAVS' + struct.pack('<L', len(name_sig)) + val_date + val_time + t
    save_file(sname, t)
예제 #27
0
    def process_response(self, request, response):
        """Sets the cache, if needed."""
        #if not self._should_update_cache(request, response):
        #    # We don't need to update the cache, just return.
        #    return response

        if response.streaming or response.status_code != 200:
            return response
        
        # Don't cache responses that set a user-specific (and maybe security
        # sensitive) cookie in response to a cookie-less request.
        if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
            return response

        # Try to get the timeout from the "max-age" section of the "Cache-
        # Control" header before reverting to using the default cache_timeout
        # length.
        timeout = get_max_age(response)
        if timeout == None:
            timeout = self.cache_timeout
        elif timeout == 0:
            # max-age was set to 0, don't bother caching.
            return response
        patch_response_headers(response, timeout)
        if timeout:
            cache_key = "%s-%s" % (self.key_prefix, request.get_full_path())
            #raise ValueError(cache_key)
            if hasattr(response, 'render') and isinstance(response.render, collections.Callable):
                response.add_post_render_callback(
                    lambda r: cache._cache.set(cache_key.encode("utf-8"), zlib.compress(r.content, 9), timeout)
                )
            else:
                # we use the highest compression level, because since it is cached we hope for it to pay off
                cache._cache.set(cache_key.encode("utf-8"), zlib.compress(response.content, 9), timeout)
        return response
예제 #28
0
파일: core.py 프로젝트: sebix/acrylamid
    def set(self, path, key, value):
        """Save a key, value pair into a blob using pickle and moderate zlib
        compression (level 6). We simply save a dictionary containing all
        different intermediates (from every view) of an entry.

        :param path: path of this cache object
        :param key: dictionary key where we store the value
        :param value: a string we compress with zlib and afterwards save
        """
        if exists(path):
            try:
                with io.open(path, 'rb') as fp:
                    rv = pickle.load(fp)
            except (pickle.PickleError, IOError):
                cache.remove(path)
                rv = {}
            try:
                with io.open(path, 'wb') as fp:
                    rv[key] = zlib.compress(value, 6)
                    pickle.dump(rv, fp, pickle.HIGHEST_PROTOCOL)
            except (IOError, pickle.PickleError) as e:
                log.warn('%s: %s' % (e.__class__.__name__, e))
        else:
            try:
                fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
                                           dir=self.cache_dir)
                with io.open(fd, 'wb') as fp:
                    pickle.dump({key: zlib.compress(value, 6)}, fp, pickle.HIGHEST_PROTOCOL)
                os.rename(tmp, path)
                os.chmod(path, self.mode)
            except (IOError, OSError, pickle.PickleError, zlib.error) as e:
                log.warn('%s: %s' % (e.__class__.__name__, e))

        self.objects[path].add(key)
        return value
예제 #29
0
    def test_multi_decoding_deflate_deflate(self):
        data = zlib.compress(zlib.compress(b'foo'))

        fp = BytesIO(data)
        r = HTTPResponse(fp, headers={'content-encoding': 'deflate, deflate'})

        assert r.data == b'foo'
예제 #30
0
  def respond(self, result):
    print 'server.respond', type(result), result

    if isinstance(result, failure.Failure):
      print 'server.respond to failure', result
      self.command['results'] = result.getTraceback()
      self.sendString('log.' + pickle.dumps(self.command))

    elif isinstance(result, tuple):
      print 'server.browseResults are tuple'
      self.command['results'] = result[0]
      outfileName, errfileName = result[1]

      self.command['results'] = zlib.compress(open(outfileName, 'rb').read())
      self.sendString('out.' + pickle.dumps(self.command))
      os.remove(outfileName)

      self.command['results'] = zlib.compress(open(errfileName, 'rb').read())
      self.sendString('err.' + pickle.dumps(self.command))
      os.remove(errfileName)

    else:
      print 'server.Unknown Response', result
      self.command['results'] = result
      self.sendString('log.' + pickle.dumps(self.command))

    self.transport.loseConnection()
예제 #31
0
    def save(self, file: Union[str, BinaryIO]=None) -> bytes:
        """
        Returns the region as bytes with
        the anvil file format structure,
        aka the final ``.mca`` file.

        Parameters
        ----------
        file
            Either a path or a file object, if given region
            will be saved there.
        """
        # Store all the chunks data as zlib compressed nbt data
        chunks_data = []
        for chunk in self.chunks:
            if chunk is None:
                chunks_data.append(None)
                continue
            chunk_data = BytesIO()
            chunk.save().write_file(buffer=chunk_data)
            chunk_data.seek(0)
            chunk_data = zlib.compress(chunk_data.read())
            chunks_data.append(chunk_data)

        # This is what is added after the location and timestamp header
        chunks_bytes = bytes()
        offsets = []
        for chunk in chunks_data:
            if chunk is None:
                offsets.append(None)
                continue
            # 4 bytes are for length, b'\x02' is the compression type which is 2 since its using zlib
            to_add = (len(chunk)+1).to_bytes(4, 'big') + b'\x02' + chunk

            # offset in 4KiB sectors
            sector_offset = len(chunks_bytes) // 4096
            sector_count = math.ceil(len(to_add) / 4096)
            offsets.append((sector_offset, sector_count))

            # Padding to be a multiple of 4KiB long
            to_add += bytes(4096 - (len(to_add) % 4096))
            chunks_bytes += to_add

        locations_header = bytes()
        for offset in offsets:
            # None means the chunk is not an actual chunk in the region
            # and will be 4 null bytes, which represents non-generated chunks to minecraft
            if offset is None:
                locations_header += bytes(4)
            else:
                # offset is (sector offset, sector count)
                locations_header += (offset[0] + 2).to_bytes(3, 'big') + offset[1].to_bytes(1, 'big')

        # Set them all as 0
        timestamps_header = bytes(4096)

        final = locations_header + timestamps_header + chunks_bytes

        # Pad file to be a multiple of 4KiB in size
        # as Minecraft only accepts region files that are like that
        final += bytes(4096 - (len(final) % 4096))
        assert len(final) % 4096 == 0 # just in case

        # Save to a file if it was given
        if file:
            if isinstance(file, str):
                with open(file, 'wb') as f:
                    f.write(final)
            else:
                file.write(final)
        return final
예제 #32
0
파일: mbutils.py 프로젝트: nurenda/QTiles
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs):

    silent = kwargs.get('silent')

    if not silent:
        logger.info("Importing disk to MBTiles")
        logger.debug("%s --> %s" % (directory_path, mbtiles_file))

    con = mbtiles_connect(mbtiles_file, silent)
    cur = con.cursor()
    optimize_connection(cur)
    mbtiles_setup(cur)
    #~ image_format = 'png'
    image_format = kwargs.get('format', 'png')

    try:
        metadata = json.load(
            open(os.path.join(directory_path, 'metadata.json'), 'r'))
        image_format = kwargs.get('format')
        for name, value in metadata.items():
            cur.execute('insert into metadata (name, value) values (?, ?)',
                        (name, value))
        if not silent:
            logger.info('metadata from metadata.json restored')
    except IOError:
        if not silent:
            logger.warning('metadata.json not found')

    count = 0
    start_time = time.time()

    for zoom_dir in get_dirs(directory_path):
        if kwargs.get("scheme") == 'ags':
            if not "L" in zoom_dir:
                if not silent:
                    logger.warning(
                        "You appear to be using an ags scheme on an non-arcgis Server cache."
                    )
            z = int(zoom_dir.replace("L", ""))
        elif kwargs.get("scheme") == 'gwc':
            z = int(zoom_dir[-2:])
        else:
            if "L" in zoom_dir:
                if not silent:
                    logger.warning(
                        "You appear to be using a %s scheme on an arcgis Server cache. Try using --scheme=ags instead"
                        % kwargs.get("scheme"))
            z = int(zoom_dir)
        for row_dir in get_dirs(os.path.join(directory_path, zoom_dir)):
            if kwargs.get("scheme") == 'ags':
                y = flip_y(z, int(row_dir.replace("R", ""), 16))
            elif kwargs.get("scheme") == 'gwc':
                pass
            elif kwargs.get("scheme") == 'zyx':
                y = flip_y(int(z), int(row_dir))
            else:
                x = int(row_dir)
            for current_file in os.listdir(
                    os.path.join(directory_path, zoom_dir, row_dir)):
                if current_file == ".DS_Store" and not silent:
                    logger.warning(
                        "Your OS is MacOS,and the .DS_Store file will be ignored."
                    )
                else:
                    file_name, ext = current_file.split('.', 1)
                    f = open(
                        os.path.join(directory_path, zoom_dir, row_dir,
                                     current_file), 'rb')
                    file_content = f.read()
                    f.close()
                    if kwargs.get('scheme') == 'xyz':
                        y = flip_y(int(z), int(file_name))
                    elif kwargs.get("scheme") == 'ags':
                        x = int(file_name.replace("C", ""), 16)
                    elif kwargs.get("scheme") == 'gwc':
                        x, y = file_name.split('_')
                        x = int(x)
                        y = int(y)
                    elif kwargs.get("scheme") == 'zyx':
                        x = int(file_name)
                    else:
                        y = int(file_name)

                    if (ext == image_format):
                        if not silent:
                            logger.debug(
                                ' Read tile from Zoom (z): %i\tCol (x): %i\tRow (y): %i'
                                % (z, x, y))
                        cur.execute(
                            """insert into tiles (zoom_level,
                            tile_column, tile_row, tile_data) values
                            (?, ?, ?, ?);""",
                            (z, x, y, sqlite3.Binary(file_content)))
                        count = count + 1
                        if (count % 100) == 0 and not silent:
                            logger.info(" %s tiles inserted (%d tiles/sec)" %
                                        (count, count /
                                         (time.time() - start_time)))
                    elif (ext == 'grid.json'):
                        if not silent:
                            logger.debug(
                                ' Read grid from Zoom (z): %i\tCol (x): %i\tRow (y): %i'
                                % (z, x, y))
                        # Remove potential callback with regex
                        file_content = file_content.decode('utf-8')
                        has_callback = re.match(
                            r'[\w\s=+-/]+\(({(.|\n)*})\);?', file_content)
                        if has_callback:
                            file_content = has_callback.group(1)
                        utfgrid = json.loads(file_content)

                        data = utfgrid.pop('data')
                        compressed = zlib.compress(
                            json.dumps(utfgrid).encode())
                        cur.execute(
                            """insert into grids (zoom_level, tile_column, tile_row, grid) values (?, ?, ?, ?) """,
                            (z, x, y, sqlite3.Binary(compressed)))
                        grid_keys = [k for k in utfgrid['keys'] if k != ""]
                        for key_name in grid_keys:
                            key_json = data[key_name]
                            cur.execute(
                                """insert into grid_data (zoom_level, tile_column, tile_row, key_name, key_json) values (?, ?, ?, ?, ?);""",
                                (z, x, y, key_name, json.dumps(key_json)))

    if not silent:
        logger.debug('tiles (and grids) inserted.')

    if kwargs.get('compression', False):
        compression_prepare(cur, silent)
        compression_do(cur, con, 256, silent)
        compression_finalize(cur, con, silent)

    optimize_database(con, silent)
예제 #33
0
 def send_zipped_pickle(self, obj, flags=0, protocol=-1):
     """pack and compress an object with pickle and zlib."""
     pobj = pickle.dumps(obj, protocol)
     zobj = zlib.compress(pobj)
     print('zipped pickle is %i bytes' % len(zobj))
     return self.send(zobj, flags=flags)
예제 #34
0
def JXwrite(data, fileName):
    from json import dumps
    from lzma import compress
    with open(fileName, 'wb') as f:
        f.write(compress(bytes(dumps(data), encoding="ascii"), preset=6))
예제 #35
0
def JZwrite(data, fileName):
    from json import dumps
    from zlib import compress
    with open(fileName, 'wb') as f: 
        f.write(compress(bytes(dumps(data), encoding="ascii"), 3))
예제 #36
0
def Zwrite(data, fileName):
    from zlib import compress
    with open(fileName, 'wb') as f: 
        f.write(compress(data, 1))
예제 #37
0
파일: ITA.py 프로젝트: zlu27/pySPM
    def add_new_images(self,
                       miblock,
                       scans=None,
                       added=None,
                       prog=False,
                       **kargs):
        # Compatibility with old parameter names
        if 'Scans' in kargs:
            scans = kargs.pop("Scans")
        if 'Added' in kargs:
            added = kargs.pop("Added")

        assert scans is not None or added is not None
        lvl = 3  # zlib encoding level
        sy, sx = self.size['pixels']['y'], self.size['pixels']['x']
        SN = miblock.goto("SN").get_string()
        if added is None:
            added_img = np.zeros((sy, sx), dtype=np.uint32)
        chID = miblock.goto("id").get_ulong()
        if scans is not None:
            N = self.root.goto(
                "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image.NumberOfImages"
            ).get_ulong()
        AN = self.root.goto(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image.NumberOfImages"
        ).get_ulong()
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.MassIntervalSN", SN.encode('utf8'))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.XSize", struct.pack("<I", sx))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.YSize", struct.pack("<I", sy))
        if scans is not None:
            RS = range(self.Nscan)
            if prog:
                RS = PB(RS)
            for i in RS:
                img = np.flipud(scans[i].astype(np.uint32, casting='unsafe'))
                data = zlib.compress(struct.pack("<{}I".format(sx * sy),
                                                 *np.ravel(img)),
                                     level=lvl)
                self.root.edit_block(
                    "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image[{}]"
                    .format(N),
                    "ImageArray.Long",
                    data,
                    id=i,
                    _type=128)
                if added is None:
                    added_img += img

        if added is None:
            added = added_img
        else:
            added = np.flipud(added)
        data = zlib.compress(struct.pack(
            "<{}I".format(sx * sy),
            *np.ravel(added.astype(np.uint32, casting='unsafe'))),
                             level=lvl)
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN),
            "ImageArray.Long",
            data,
            _type=128)

        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.PulsesPerPixel",
            struct.pack("<I", self.spp * self.Nscan))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.MaxCountsPerPixel",
            struct.pack("<I", int(np.max(added))))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.MinCountsPerPixel",
            struct.pack("<I", int(np.min(added))))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.TotalCountsDbl",
            struct.pack("<d", np.sum(added)))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]"
            .format(AN), "Image.TotalCounts",
            struct.pack("<I", int(np.sum(added))))

        if scans is not None:
            self.root.edit_block(
                "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans",
                "Image.NumberOfImages", struct.pack("<I", N + 1))
        self.root.edit_block(
            "filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded",
            "Image.NumberOfImages", struct.pack("<I", AN + 1))
        self.Nimg += 1
예제 #38
0
def compress_bytes_to_string_b64zlib(data: bytes) -> str:
    return b64encode(zlib.compress(data)).decode('ascii')
예제 #39
0
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header

import zlib

input_data = b'Some repeated text.\n' * 1024
template = '{:>5}  {:>5}'

print(template.format('Level', 'Size'))
print(template.format('-----', '----'))

for i in range(0, 10):
    data = zlib.compress(input_data, i)
    print(template.format(i, len(data)))
예제 #40
0
 def get_broks(self, bname):
     res = self.app.get_broks()
     return base64.b64encode(zlib.compress(cPickle.dumps(res), 2))
예제 #41
0
    def run(self):

        #test_str = '"cmd" : {"type" : "arp", "id" : "patient_X2", "reply-to" : "amqp://'+self.context.getUuid()+'"}'
        #test_str = None
        cmd_str = None

        while True:

            cmd_str = None

            if not self.context.getProcGraph().isRegistryModified():

                data = self.context.getProcGraph().dumpExternalRegistry()

                try:
                    cmd_str = self.cmd.get(False)
                except Queue.Empty as e:
                    pass

                if cmd_str:
                    if len(data) > 5:
                        data = data[:-1]
                        data = data + "," + cmd_str + "}"
                    else:
                        data = "{" + cmd_str + "}"
                else:
                    time.sleep(self.context.get_broadcast_rate())

                if self.context.is_with_nocompress():
                    dataz = data
                else:
                    dataz = zlib.compress(data)

                if self.connection.is_closed:
                    self._connect()

                self.channel.basic_publish(exchange=self.exchange,
                                           routing_key='',
                                           body=dataz)

                if self.stopped():
                    logging.debug("Exiting thread: " + self.__class__.__name__)
                    break
                else:
                    continue

            if self.context.getProcGraph().isRegistryModified():
                data = self.context.getProcGraph().dumpExternalRegistry()

                # if cmd_str:
                #     if len(data) > 5:
                #         data = data[:-1]
                #         data = data+","+cmd_str+"}"
                #     else:
                #         data = "{"+cmd_str+"}"

                #if self.context.is_with_nocompress():
                #    dataz = data
                #else:
                dataz = zlib.compress(data)
                self.context.getProcGraph().ackRegistryUpdate()

                if self.connection.is_closed:
                    self._connect()

                self.channel.basic_publish(exchange=self.exchange,
                                           routing_key='',
                                           body=dataz)

                if self.stopped():
                    logging.debug("Exiting thread: " + self.__class__.__name__)
                    break
                else:
                    continue
예제 #42
0
 def compress(self):
     """
         Convenience method for compressing the json output.
     """
     return compress(self.json())
예제 #43
0
def compressSysMessage(message):
    if isinstance(message, dict):
        message = zlib.compress(cPickle.dumps(message, -1), 1)
    return message
예제 #44
0
f = open("D:\\mywork\\pycharm_workspace\\FirstPython\\temp\\cuit.html",
         "w",
         encoding='UTF-8')
for line in urlopen('http://www.cuit.edu.cn/'):
    line = line.decode('UTF-8')
    f.write(line + os.linesep)

# 日期和时间
now = date.today()
print(now)
strNow = now.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B.")
print(strNow)

birthday = date(1994, 10, 20)
age = now - birthday
print(age.days)

# 数据压缩
s = b'witch which has which witches wrist watch'
print(len(s))
t = zlib.compress(s)
print(len(t))
print(zlib.decompress(t))
print(zlib.crc32(s))

# 性能度量
lt1 = Timer('t=a; a=b; b=t', 'a=1;b=2').timeit()
print(lt1)
lt2 = Timer('a,b = b, a', 'a=1; b=2').timeit()
print(lt2)
예제 #45
0
 def compress(self, data: bytes=None):
     assert type(data) is bytes, "data has to be given as bytes"
     return zlib.compress(data, level=9)
예제 #46
0
파일: talk.py 프로젝트: Alvin-22/Eagle-Eyes
def Talk(encoding, path, user):
	try:
		global talk_settings

		CHUNK = 81920
		FORMAT = pyaudio.paInt16
		CHANNELS = 2
		RATE = 44100
		headersize = 10
		new_msg = True
		msg_len = 0
		full_msg = b''
		frames = []

		e = Encryption()
		client, addr = s.accept()

		u = (user, client, addr)
		talk_settings.append(u)

		p = pyaudio.PyAudio()
		stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=False, frames_per_buffer=CHUNK)

		msg = pickle.dumps(stream.read(CHUNK))
		msg = zlib.compress(msg, 1)
		msg = e.do_encrypt(msg)
		final_msg = bytes(f'{len(msg):<{headersize}}', encoding) + msg
		client.send(final_msg)

		while True:
			try:
				client_msg = client.recv(1024)

				if new_msg:
					msg_len = int(client_msg[:headersize])
					new_msg = False

				full_msg += client_msg

				if len(full_msg)-headersize == msg_len:
					data = stream.read(CHUNK)
					frames.append(data)

					msg = pickle.dumps(data)
					msg = zlib.compress(msg, 1)
					msg = e.do_encrypt(msg)
					final_msg = bytes(f'{len(msg):<{headersize}}', encoding) + msg
					client.send(final_msg)
					
					new_msg = True
					msg_len = 0
					full_msg = b''
			except:
				talk_settings.remove(u)
				waveFile = wave.open(f'{path}/{time.strftime("%Y-%m-%d (%H-%M-%S)")}.wav', 'wb')
				waveFile.setnchannels(CHANNELS)
				waveFile.setsampwidth(p.get_sample_size(FORMAT))
				waveFile.setframerate(RATE)
				waveFile.writeframes(b''.join(frames))
				waveFile.close()
				exit(0)
	except:
		exit(0)
예제 #47
0
def addCheckerRun(session, command, name, tag, username,
                  run_history_time, version, force):
    """
    Store checker run related data to the database.
    By default updates the results if name already exists.
    Using the force flag removes existing analysis results for a run.
    """
    try:
        LOG.debug("adding checker run")

        run = session.query(Run).filter(Run.name == name).one_or_none()

        if run and force:
            # Clean already collected results.
            if not run.can_delete:
                # Deletion is already in progress.
                msg = "Can't delete " + str(run.id)
                LOG.debug(msg)
                raise shared.ttypes.RequestFailed(
                    shared.ttypes.ErrorCode.DATABASE,
                    msg)

            LOG.info('Removing previous analysis results ...')
            session.delete(run)

            checker_run = Run(name, version, command)
            session.add(checker_run)
            session.flush()
            run_id = checker_run.id

        elif run:
            # There is already a run, update the results.
            run.date = datetime.now()
            run.command = command
            run.duration = -1
            session.flush()
            run_id = run.id
        else:
            # There is no run create new.
            checker_run = Run(name, version, command)
            session.add(checker_run)
            session.flush()
            run_id = checker_run.id

        # Add run to the history.
        LOG.debug("adding run to the history")

        if tag is not None:
            run_history = session.query(RunHistory) \
                .filter(RunHistory.run_id == run_id,
                        RunHistory.version_tag == tag) \
                .one_or_none()

            if run_history:
                run_history.version_tag = None
                session.add(run_history)

        compressed_command = zlib.compress(command,
                                           zlib.Z_BEST_COMPRESSION)
        run_history = RunHistory(run_id, tag, username, run_history_time,
                                 compressed_command)
        session.add(run_history)

        session.flush()
        return run_id
    except Exception as ex:
        raise shared.ttypes.RequestFailed(
            shared.ttypes.ErrorCode.GENERAL,
            str(ex))
예제 #48
0
def make_char_data_file():
    """
    Build the compressed data file 'char_classes.dat' and write it to the
    current directory.

    If you run this, run it in Python 3.3 or later. It will run in earlier
    versions, but you won't get the current Unicode standard, leading to
    inconsistent behavior.
    """
    cclasses = [None] * 0x110000
    for codepoint in range(0x0, 0x110000):
        char = unichr(codepoint)
        category = unicodedata.category(char)

        if category.startswith('L'):  # letters
            if unicodedata.name(char).startswith('LATIN')\
            and codepoint < 0x200:
                if category == 'Lu':
                    cclasses[codepoint] = 'L'
                else:
                    cclasses[codepoint] = 'l'
            else: # non-Latin letter, or close enough
                if category == 'Lu' or category == 'Lt':
                    cclasses[codepoint] = 'A'
                elif category == 'Ll':
                    cclasses[codepoint] = 'a'
                elif category == 'Lo':
                    cclasses[codepoint] = 'C'
                elif category == 'Lm':
                    cclasses[codepoint] = 'm'
                else:
                    raise ValueError('got some weird kind of letter')
        elif category.startswith('M'):  # marks
            cclasses[codepoint] = 'M'
        elif category == 'No':
            cclasses[codepoint] = 'N'
        elif category == 'Sm':
            cclasses[codepoint] = '0'
        elif category == 'Sc':
            cclasses[codepoint] = '1'
        elif category == 'Sk':
            cclasses[codepoint] = '2'
        elif category == 'So':
            cclasses[codepoint] = '3'
        elif category == 'Cn':
            cclasses[codepoint] = '_'
        elif category == 'Cc':
            cclasses[codepoint] = 'X'
        elif category == 'Cs':
            cclasses[codepoint] = 'S'
        elif category == 'Co':
            cclasses[codepoint] = 'P'
        elif category.startswith('Z'):
            cclasses[codepoint] = ' '
        else:
            cclasses[codepoint] = 'o'

    cclasses[9] = cclasses[10] = cclasses[12] = cclasses[13] = ' '
    out = open('char_classes.dat', 'wb')
    out.write(zlib.compress(''.join(cclasses).encode('ascii')))
    out.close()
예제 #49
0
def stringify_message_dict(message_dict: Dict[str, Any]) -> bytes:
    return zlib.compress(orjson.dumps(message_dict))
예제 #50
0
    def close(self):
        """All tables must have been written to disk. Now write the
		directory.
		"""
        tables = sorted(self.tables.items())
        if len(tables) != self.numTables:
            from fontTools import ttLib
            raise ttLib.TTLibError(
                "wrong number of tables; expected %d, found %d" %
                (self.numTables, len(tables)))

        if self.flavor == "woff":
            self.signature = b"wOFF"
            self.reserved = 0

            self.totalSfntSize = 12
            self.totalSfntSize += 16 * len(tables)
            for tag, entry in tables:
                self.totalSfntSize += (entry.origLength + 3) & ~3

            data = self.flavorData if self.flavorData else WOFFFlavorData()
            if data.majorVersion is not None and data.minorVersion is not None:
                self.majorVersion = data.majorVersion
                self.minorVersion = data.minorVersion
            else:
                if hasattr(self, 'headTable'):
                    self.majorVersion, self.minorVersion = struct.unpack(
                        ">HH", self.headTable[4:8])
                else:
                    self.majorVersion = self.minorVersion = 0
            if data.metaData:
                self.metaOrigLength = len(data.metaData)
                self.file.seek(0, 2)
                self.metaOffset = self.file.tell()
                import zlib
                compressedMetaData = zlib.compress(data.metaData)
                self.metaLength = len(compressedMetaData)
                self.file.write(compressedMetaData)
            else:
                self.metaOffset = self.metaLength = self.metaOrigLength = 0
            if data.privData:
                self.file.seek(0, 2)
                off = self.file.tell()
                paddedOff = (off + 3) & ~3
                self.file.write('\0' * (paddedOff - off))
                self.privOffset = self.file.tell()
                self.privLength = len(data.privData)
                self.file.write(data.privData)
            else:
                self.privOffset = self.privLength = 0

            self.file.seek(0, 2)
            self.length = self.file.tell()

        else:
            assert not self.flavor, "Unknown flavor '%s'" % self.flavor
            pass

        directory = sstruct.pack(self.directoryFormat, self)

        self.file.seek(self.directorySize)
        seenHead = 0
        for tag, entry in tables:
            if tag == "head":
                seenHead = 1
            directory = directory + entry.toString()
        if seenHead:
            self.writeMasterChecksum(directory)
        self.file.seek(0)
        self.file.write(directory)
예제 #51
0
    def _submit(self,
                run,
                limits,
                cpu_model,
                required_files,
                result_files_patterns,
                meta_information,
                priority,
                user_pwd,
                svn_branch,
                svn_revision,
                counter=0):

        params = []
        opened_files = [
        ]  # open file handles are passed to the request library

        for programPath in run.sourcefiles:
            norm_path = self._normalize_path_for_cloud(programPath)
            params.append(
                ('programTextHash', (norm_path,
                                     self._get_sha256_hash(programPath))))

        for required_file in required_files:
            norm_path = self._normalize_path_for_cloud(required_file)
            params.append(
                ('requiredFileHash', (norm_path,
                                      self._get_sha256_hash(required_file))))

        params.append(('svnBranch', svn_branch or self._svn_branch))
        params.append(('revision', svn_revision or self._svn_revision))

        if run.propertyfile:
            file = self._add_file_to_params(params, 'propertyText',
                                            run.propertyfile)
            opened_files.append(file)

        if MEMLIMIT in limits:
            params.append(('memoryLimitation', str(limits[MEMLIMIT])))
        if TIMELIMIT in limits:
            params.append(('timeLimitation', str(limits[TIMELIMIT])))
        if SOFTTIMELIMIT in limits:
            params.append(('softTimeLimitation', str(limits[SOFTTIMELIMIT])))
        if CORELIMIT in limits:
            params.append(('coreLimitation', str(limits[CORELIMIT])))
        if cpu_model:
            params.append(('cpuModel', cpu_model))

        if result_files_patterns:
            for pattern in result_files_patterns:
                params.append(('resultFilesPattern', pattern))
        else:
            params.append(('resultFilesPattern', ''))

        if priority:
            params.append(('priority', priority))

        (invalidOption, files) = self._handle_options(run, params, limits)
        opened_files.extend(files)
        if invalidOption:
            raise WebClientError('Command {0}  contains option "{1}" that is not usable with the webclient. '\
                .format(run.options, invalidOption))

        params.append(('groupId', str(self._group_id)))
        if meta_information:
            params.append(('metaInformation', meta_information))

        # prepare request
        headers = {"Accept": "text/plain"}
        path = "runs/"
        (response, statusCode) = self._request("POST", path, files=params, headers=headers, \
                                             expectedStatusCodes=[200, 412], user_pwd=user_pwd)

        for opened_file in opened_files:
            opened_file.close()

        # program files or required files given as hash value are not known by the cloud system
        if statusCode == 412:
            if counter >= 1:
                raise WebClientError(
                    'Files still missing on server for run {0} even after uploading them:\n{1}'
                    .format(run.identifier, response))
            headers = {
                "Content-Type": "application/octet-stream",
                "Content-Encoding": "deflate"
            }
            filePath = "files/"

            # upload all used program files
            for programPath in run.sourcefiles:
                with open(programPath, 'rb') as programFile:
                    compressedProgramText = zlib.compress(
                        programFile.read(), 9)
                    self._request('POST', filePath, data=compressedProgramText, headers=headers,\
                                   expectedStatusCodes=[200, 204], user_pwd=user_pwd)

            # upload all required files
            for required_file_path in required_files:
                with open(required_file_path, 'rb') as required_file:
                    compressed_required_file = zlib.compress(
                        required_file.read(), 9)
                    self._request('POST', filePath, data=compressed_required_file, headers=headers,\
                                   expectedStatusCodes=[200, 204], user_pwd=user_pwd)

            # retry submission of run
            return self._submit(run, limits, cpu_model, required_files,
                                result_files_patterns, meta_information,
                                priority, user_pwd, svn_branch, svn_revision,
                                counter + 1)

        else:
            try:
                run_id = response.decode("UTF-8")
            except UnicodeDecodeError as e:
                raise WebClientError(
                    'Malformed response from server while submitting run {0}:\n{1}'
                    .format(run.identifier, response)) from e
            if not VALID_RUN_ID.match(run_id):
                raise WebClientError(
                    'Malformed response from server while submitting run {0}:\n{1}'
                    .format(run.identifier, run_id))
            logging.debug('Submitted run with id %s', run_id)
            return self._create_and_add_run_future(run_id)
예제 #52
0
import zlib, base64

file1 = open('test.txt', 'r')
text = file1.read()
file1.close()

code = base64.b64encode(zlib.compress(text.encode('utf-8'), 9))
code = code.decode('utf-8')

f = open('compressed.txt', 'w')
f.write(code)
f.close()
예제 #53
0
	def do_GET(self):
		if self.path.find('?') < 0:
			if options.hide:
				self.send_response(404)
				self.send_header('Content-length', '3')
				self.send_header("Content-type", "text/html")
				self.end_headers()
				self.wfile.write('404')
			else:
				self.send_response(302, "Moved")
				self.send_header('Content-length', '0')
				self.send_header("Content-type", "text/html")
				self.send_header("Location", 'http://hstop.berlios.de/')
				self.end_headers()
			return
		try:
			(stuff, args) = self.path.split('?',1)
		except ValueError: ## dummy
			args = self.path
		arglist = cgi.parse_qs(args)
		try:
			s = urllib.unquote(arglist['i'][0])
			sitem = sessionlist.get(s)
			if not sitem:
				zip = True
				if arglist.has_key('z'):
					zip = False
				sessionlist.add(s, arglist['t'][0], arglist['h'][0], arglist['p'][0], self.client_address, zip)
				sitem = sessionlist.get(s)
		except KeyError:
			s = None
			sitem = None

		if sitem and sitem.work:
			try:
				item = sitem.q.qin.get(True, QUEUE_TIMEOUT)
			except (Queue.Empty, ):
				item = None
				
			if item:
				try:
					while len(item) < REQUES_MAX_SIZE:
						item = item + sitem.q.qin.get(False)
				except (Queue.Empty, TypeError ):
					pass
				if sitem.zipit:
					item1 = zlib.compress(item,9)
					if len(item1) < len(item):
						item = item1
						item1 = None
				#item = httpencode(item)
				self.send_response(200)
				self.send_header('Content-length', str(len(item)))
				self.end_headers()
				#print 'snd: ' , item.strip()
				self.wfile.write(item)
				
			else:
				self.send_response(200)
				self.send_header('Content-length', '0')
				self.end_headers()
		else:
			self.send_response(404)
			self.send_header('Content-length', '3')
			self.send_header("Content-type", "text/html")
			self.end_headers()
			self.wfile.write('404')
			if sitem:
				sitem.clean()
예제 #54
0
    def save(self):
        os = BytesIO()
        writeU8(os, 25)  # Version

        #flags
        flags = 0x00
        if self.pos[1] < -1:
            flags |= 0x01  #is_underground
        flags |= 0x02  #day_night_differs
        flags |= 0x04  #lighting_expired
        flags |= 0x08  #generated
        writeU8(os, flags)

        writeU8(os, 2)  # content_width
        writeU8(os, 2)  # params_width

        cbuffer = BytesIO()
        # Bulk node data
        content = self.content
        k = 0
        nimap = {}
        rev_nimap = []
        first_free_content = 0
        for z in range(16):
            for y in range(16):
                for x in range(16):
                    #writeU16(cbuffer, content[k])
                    c = content[k]
                    if c in nimap:
                        writeU16(cbuffer, nimap[c])
                    else:
                        nimap[c] = first_free_content
                        writeU16(cbuffer, first_free_content)
                        rev_nimap.append(c)
                        first_free_content += 1
                    k += 1
                k += (256 - 16)
            k += (16 - 16 * 256)
        param1 = self.param1
        k = 0
        for z in range(16):
            for y in range(16):
                for x in range(16):
                    writeU8(cbuffer, param1[k])
                    k += 1
                k += (256 - 16)
            k += (16 - 16 * 256)
        param2 = self.param2
        k = 0
        for z in range(16):
            for y in range(16):
                for x in range(16):
                    writeU8(cbuffer, param2[k])
                    k += 1
                k += (256 - 16)
            k += (16 - 16 * 256)
        os.write(zlib.compress(cbuffer.getvalue()))

        # Nodemeta
        meta = self.metadata

        cbuffer = BytesIO()
        writeU8(cbuffer, 1)  # Version
        writeU16(cbuffer, len(meta))
        for pos, data in meta.items():
            writeU16(cbuffer, (pos[2] << 8) | (pos[1] << 4) | pos[0])
            writeU32(cbuffer, len(data[0]))
            for name, val in data[0].items():
                writeString(cbuffer, name)
                writeLongString(cbuffer, str(val))
            serialize_inv(cbuffer, data[1])
        os.write(zlib.compress(cbuffer.getvalue()))

        # Static objects
        writeU8(os, 0)  # Version
        writeU16(os, 0)  # Number of objects

        # Timestamp
        writeU32(os, 0xffffffff)  # BLOCK_TIMESTAMP_UNDEFINED

        # Name-ID mapping
        writeU8(os, 0)  # Version
        writeU16(os, len(rev_nimap))
        for i in range(len(rev_nimap)):
            writeU16(os, i)
            writeString(os, self.name_id_mapping[rev_nimap[i]])

        # Node timer
        writeU8(os, 2 + 4 + 4)  # Timer data len
        writeU16(os, len(self.timers))  # Number of timers
        if len(self.timers) > 0:
            logger.info('wrote ' + str(len(self.timers)) + ' node timers')
        for i in range(len(self.timers)):
            writeU16(os, self.timers[i][0])
            writeU32(os, self.timers[i][1])
            writeU32(os, self.timers[i][2])

        return os.getvalue()
예제 #55
0
# Learning to use the zlib compression library
# Let's experiment and try to figure it out...
import zlib

# We need a 'b' in front of the string to convert it to bytes,
# because it turns out that the compress() method only works
# with bytes, not with strings.
#data = b'The quick brown fox jumps over the lazy dog'

# The encode() method converts a string to bytes, just like
# putting 'b' in front of the string converts it to bytes.
data = 'The quick brown fox jumps over the lazy dog'
data = data.encode()

# compress the data
data_compressed = zlib.compress(data)

print(data_compressed)

# uncompress the data
data_uncompressed = zlib.decompress(data_compressed)

# decompress returns a bytes object, but we can convert that to
# a string by using the decode() method.
data_uncompressed = data_uncompressed.decode()
print(data_uncompressed)


# Let's write a function to compress a file
def comp106():
    filename = input('Enter name of file to be compressed: ')
예제 #56
0
 def write(self, writer):
     data = zlib.compress(self.data)
     writer.write_uint32(len(data))
     writer.write(data)
def verify_processed_data(
    mocked_args, monkeypatch, spark, s3_prefix_adg, adg_output_key
):
    tag_set = TAG_SET_FULL if mocked_args.snapshot_type == SNAPSHOT_TYPE_FULL else TAG_SET_INCREMENTAL
    tbl_name = "core_contract"
    collection_location = "core"
    collection_name = "contract"
    test_data = b'{"name":"abcd"}\n{"name":"xyz"}'
    target_object_key = f"${{file_location}}/{mocked_args.snapshot_type}/{RUN_TIME_STAMP}/{collection_location}/{collection_name}/part-00000"
    s3_client = boto3.client("s3", endpoint_url=MOTO_SERVER_URL)
    s3_resource = boto3.resource("s3", endpoint_url=MOTO_SERVER_URL)
    s3_client.create_bucket(Bucket=S3_HTME_BUCKET)
    s3_client.create_bucket(Bucket=S3_PUBLISH_BUCKET)
    s3_client.put_object(
        Body=zlib.compress(test_data),
        Bucket=S3_HTME_BUCKET,
        Key=f"{S3_PREFIX}/{DB_CORE_CONTRACT_FILE_NAME}",
        Metadata={
            "iv": "123",
            "ciphertext": "test_ciphertext",
            "datakeyencryptionkeyid": "123",
        },
    )
    monkeypatch_with_mocks(monkeypatch)
    generate_dataset_from_htme.main(
        spark,
        s3_client,
        S3_HTME_BUCKET,
        SECRETS_COLLECTIONS,
        KEYS_MAP,
        RUN_TIME_STAMP,
        S3_PUBLISH_BUCKET,
        PUBLISHED_DATABASE_NAME,
        mocked_args,
        s3_resource,
    )
    assert len(s3_client.list_buckets()["Buckets"]) == 2
    assert (
        s3_client.get_object(Bucket=S3_PUBLISH_BUCKET, Key=target_object_key)["Body"]
        .read()
        .decode()
        .strip()
        == test_data.decode()
    )
    assert (
        s3_client.get_object_tagging(Bucket=S3_PUBLISH_BUCKET, Key=target_object_key)[
            "TagSet"
        ]
        == tag_set
    )
    assert tbl_name in [
        x.name for x in spark.catalog.listTables(PUBLISHED_DATABASE_NAME)
    ]
    assert (
        CORRELATION_ID
        in s3_client.get_object(Bucket=S3_PUBLISH_BUCKET, Key=adg_output_key)["Body"]
        .read()
        .decode()
    )
    assert (
        s3_prefix_adg
        in s3_client.get_object(Bucket=S3_PUBLISH_BUCKET, Key=adg_output_key)["Body"]
        .read()
        .decode()
    )
예제 #58
0
    # btc_jpy = zaif_btc_jpy_api_response['last_price']
    # 日本円を計算
    open_jpy = open * btc * btc_jpy
    high_jpy = high * btc * btc_jpy
    low_jpy = low * btc * btc_jpy
    close_jpy = close * btc * btc_jpy
    return round(open_jpy, 10), round(high_jpy, 10), round(low_jpy, 10), round(close_jpy, 10)


def get_market_price_doge2usd(open, high, low, close, btc):
    # blockchainのAPIを指定
    blockchain_btc_usd_api_url = "https://blockchain.info/ticker"
    blockchain_btc_usd_api_response = requests.get(
        blockchain_btc_usd_api_url).json()
    btc_usd = blockchain_btc_usd_api_response['USD']['last']
    # アメリカドルを計算
    open_usd = open * btc * btc_usd
    high_usd = high * btc * btc_usd
    low_usd = low * btc * btc_usd
    close_usd = close * btc * btc_usd
    return round(open_usd, 10), round(high_usd, 10), round(low_usd, 10), round(close_usd, 10)


if __name__ == '__main__':
    path_w = './chart_data.zlib.txt'
    d = chart_db.chart_db_select(DBFILE)
    s = json.dumps(d)
    z = zlib.compress(s.encode("utf-8"))
    with open(path_w, mode='w') as f:
        f.write(z.hex())
예제 #59
0
 def encode(self,stream):
     return zlib.compress(stream)
예제 #60
0
        if bytes:
            data = self.data[:bytes]
            self.data = self.data[bytes:]
        else:
            data = self.data
            self.data = ""
        self.offset = self.offset + len(data)
        return data
    def readline(self):
        # make sure we have an entire line
        while self.zip and "\n" not in self.data:
            self.__fill(len(self.data) + 512)
        i = string.find(self.data, "\n") + 1
        if i <= 0:
            return self.read()
        return self.read(i)
    def readlines(self):
        lines = []
        while 1:
            s = self.readline()
            if not s:
                break
            lines.append(s)
        return lines
#
# try it out
data = open("samples/sample.txt").read()
data = zlib.compress(data)
file = ZipInputStream(StringIO.StringIO(data))
for line in file.readlines():
    print line[:-1]