예제 #1
0
 def loop(self):
     topic = self.socket.recv()
     if topic == b"ping":
         self.socket.send(b"Hello, World!")
     if topic == b"login":
         args = unpack(self.socket.recv())
         res = self.login(**args)
         self.socket.send(pack(res))
     if topic == b"logout":
         self.logout()
         self.socket.send(b"ok")
     if topic == b"shutdown":
         self.done = True
     if topic == b"query":
         tr, args = unpack(self.socket.recv())
         try:
             res = Query(tr).send(**args)
             self.socket.send(b"ok", zmq.SNDMORE)
             self.socket.send(pack(res))
         except Exception as e:
             log.critical(e.args)
             self.socket.send(b"error", zmq.SNDMORE)
             self.socket.send(pack(e.args))
     else:
         pass
예제 #2
0
def sign_attached(message, private_key, chunk_size, major_version=None):
    if major_version is None:
        major_version = DEFAULT_MAJOR_VERSION

    output = io.BytesIO()
    public_key = private_key[32:]
    header_hash = write_header(public_key, 1, output, major_version)

    # Write the chunks.
    for chunknum, chunk, final_flag in chunks_loop(message, chunk_size,
                                                   major_version):
        packetnum_64 = chunknum.to_bytes(8, 'big')
        if major_version == 1:
            final_flag_byte = b""
        else:
            final_flag_byte = b"\x01" if final_flag else b"\x00"
        payload_digest = hashlib.sha512(header_hash + packetnum_64 +
                                        final_flag_byte + chunk).digest()
        payload_sig_text = b"saltpack attached signature\0" + payload_digest
        payload_sig = nacl.bindings.crypto_sign(payload_sig_text, private_key)
        detached_payload_sig = payload_sig[:64]
        if major_version == 1:
            packet = [
                detached_payload_sig,
                chunk,
            ]
        else:
            packet = [
                final_flag,
                detached_payload_sig,
                chunk,
            ]
        umsgpack.pack(packet, output)

    return output.getvalue()
예제 #3
0
def main():
    args = get_args()
    with open(args['input'], 'rb') as f:
        payload = umsgpack.unpack(f)

    keys = ['lobby', 'rule', 'map', 'weapon', 'result', 'kill', 'death',
            'rank', 'rank_exp', 'rank_after', 'rank_exp_after', 'link_url']
    for key in keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload.pop(key)
        else:
            prev_value = payload.get(key, '')
            payload[key] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    # Gears. Primary ability is only supported.
    gear_keys = ['clothing', 'headgear', 'shoes']
    for key in gear_keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload['gears'].pop(key)
        else:
            prev_value = payload['gears'][key].get('primary_ability', '')
            payload['gears'][key]['primary_ability'] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    output = args.get('output') or args['input']
    with open(output, 'wb') as f:
        umsgpack.pack(payload, f)
예제 #4
0
 def loop(self):
     topic = self.socket.recv()
     if topic == b"ping":
         self.socket.send(b"Hello, World!")
     if topic == b"login":
         args = unpack(self.socket.recv())
         res = self.login(**args)
         self.socket.send(pack(res))
     if topic == b"logout":
         self.logout()
         self.socket.send(b"ok")
     if topic == b"shutdown":
         self.done = True
     if topic == b"query":
         tr, args = unpack(self.socket.recv())
         try:
             res = Query(tr).send(**args)
             self.socket.send(b"ok", zmq.SNDMORE)
             self.socket.send(pack(res))
         except Exception as e:
             log.critical(e.args)
             self.socket.send(b"error", zmq.SNDMORE)
             self.socket.send(pack(e.args))
     else:
         pass
예제 #5
0
def sign_detached(message, private_key):
    output = io.BytesIO()
    public_key = private_key[32:]
    header_hash = write_header(public_key, 2, output)
    message_digest = hashlib.sha512(header_hash + message).digest()
    message_sig_text = b"saltpack detached signature\0" + message_digest
    message_sig = nacl.bindings.crypto_sign(message_sig_text, private_key)
    detached_message_sig = message_sig[:64]
    umsgpack.pack(detached_message_sig, output)
    return output.getvalue()
예제 #6
0
 def _send_pack(self):
     if self.isclosed:
         return False
     try:
         msgpack.pack(self._pack, self._io)
         self._io.flush()
         return True
     except Exception as e:
         # print("Send package failure", e)
         return False
예제 #7
0
 def serialize(self, path):
     all_weights = []
     for w in self.worms:
         sd = w.get_state_dict()
         worm_weights = {'_position': w.get_position()}
         for k in sd:
             weight = sd[k].detach().numpy().tolist()
             worm_weights[k] = weight
         all_weights.append(worm_weights)
     with open(path, 'wb') as f:
         umsgpack.pack(all_weights, f)
예제 #8
0
def _set_database(
    dict_,  # type: Dict[str, Dict[bytes, List[Union[str, int]]]]
    routing_table,  # type: Dict[bytes, BaseConnection]
    proto  # type: Protocol
):  # type: (...) -> None
    for id_, node in routing_table.items():
        if id_ not in dict_[proto.encryption]:
            dict_[proto.encryption][id_] = list(node.addr)

    with open(_datafile, 'wb') as database:
        database.seek(0)
        pack(dict_, database)
예제 #9
0
def _set_database(
        dict_,  # type: Dict[str, Dict[bytes, List[Union[str, int]]]]
        routing_table,  # type: Dict[bytes, BaseConnection]
        proto  # type: Protocol
):  # type: (...) -> None
    for id_, node in routing_table.items():
        if id_ not in dict_[proto.encryption]:
            dict_[proto.encryption][id_] = list(node.addr)

    with open(_datafile, 'wb') as database:
        database.seek(0)
        pack(dict_, database)
예제 #10
0
파일: statink.py 프로젝트: ykws/IkaLog
    def write_payload_to_file(self, payload, filename=None):
        if filename is None:
            t = datetime.now().strftime("%Y%m%d_%H%M")
            filename = os.path.join("/tmp", "statink_%s.msgpack" % t)

        try:
            f = open(filename, "wb")
            umsgpack.pack(payload, f)
            f.close()
        except:
            IkaUtils.dprint("%s: Failed to write msgpack file" % self)
            IkaUtils.dprint(traceback.format_exc())
예제 #11
0
파일: statink.py 프로젝트: sebnsr/IkaLog
    def write_payload_to_file(self, payload, filename=None):
        if filename is None:
            t = datetime.now().strftime("%Y%m%d_%H%M")
            filename = os.path.join('/tmp', 'statink_%s.msgpack' % t)

        try:
            f = open(filename, 'wb')
            umsgpack.pack(payload, f)
            f.close()
        except:
            IkaUtils.dprint('%s: Failed to write msgpack file' % self)
            IkaUtils.dprint(traceback.format_exc())
예제 #12
0
파일: encoding.py 프로젝트: sbellem/raiden
def _pack_map(obj, fp):
    if len(obj) <= 15:
        fp.write(struct.pack("B", 0x80 | len(obj)))
    elif len(obj) <= 2**16 - 1:
        fp.write(b"\xde" + struct.pack(">H", len(obj)))
    elif len(obj) <= 2**32 - 1:
        fp.write(b"\xdf" + struct.pack(">I", len(obj)))
    else:
        raise umsgpack.UnsupportedTypeException("huge array")

    for k in sorted(obj.iterkeys()):
        umsgpack.pack(k, fp)
        umsgpack.pack(obj[k], fp)
예제 #13
0
def sign_detached(message, private_key, major_version=None):
    if major_version is None:
        major_version = DEFAULT_MAJOR_VERSION

    output = io.BytesIO()
    public_key = private_key[32:]
    header_hash = write_header(public_key, 2, output, major_version)
    message_digest = hashlib.sha512(header_hash + message).digest()
    message_sig_text = b"saltpack detached signature\0" + message_digest
    message_sig = nacl.bindings.crypto_sign(message_sig_text, private_key)
    detached_message_sig = message_sig[:64]
    umsgpack.pack(detached_message_sig, output)
    return output.getvalue()
예제 #14
0
파일: encoding.py 프로젝트: ms83/raiden
def _pack_map(obj, fp):
    if len(obj) <= 15:
        fp.write(struct.pack("B", 0x80 | len(obj)))
    elif len(obj) <= 2 ** 16 - 1:
        fp.write(b"\xde" + struct.pack(">H", len(obj)))
    elif len(obj) <= 2 ** 32 - 1:
        fp.write(b"\xdf" + struct.pack(">I", len(obj)))
    else:
        raise umsgpack.UnsupportedTypeException("huge array")

    for k in sorted(obj.iterkeys()):
        umsgpack.pack(k, fp)
        umsgpack.pack(obj[k], fp)
예제 #15
0
def write_header(public_key, mode, output, major_version):
    nonce = os.urandom(32)
    header = [
        "saltpack",
        [major_version, CURRENT_MINOR_VERSIONS[major_version]],
        mode,
        public_key,
        nonce,
    ]
    header_bytes = umsgpack.packb(header)
    header_hash = hashlib.sha512(header_bytes).digest()
    umsgpack.pack(header_bytes, output)
    return header_hash
예제 #16
0
def write_header(public_key, mode, output):
    nonce = os.urandom(32)
    header = [
        "saltpack",
        [1, 0],
        mode,
        public_key,
        nonce,
    ]
    header_bytes = umsgpack.packb(header)
    header_hash = hashlib.sha512(header_bytes).digest()
    umsgpack.pack(header_bytes, output)
    return header_hash
예제 #17
0
def store_log(name, version):
    """Store information about release.

    :param name: name of soft
    :param version: soft version
    :return: None
    """
    data = extract_data()
    version_list = data.get(name, [])
    version_list.append(version)

    with open(database, 'wb') as fh:
        data[name] = version_list
        umsgpack.pack(data, fh)
예제 #18
0
    def put_snapshot(self, snapshot):
        """Adds a new snapshot index file to the storage backend

        :type snapshot: models.Snapshot
        """
        path = "snapshots/" + str(uuid.uuid4())
        contents = io.BytesIO()
        umsgpack.pack("snapshot", contents)
        umsgpack.pack(
            {"date": snapshot.date.timestamp(), "root": snapshot.root_id,
             "path": snapshot.path, }, contents)
        contents.seek(0)
        to_upload = self.encrypter.encrypt_bytes(
            self.compress_bytes(contents.getbuffer()))
        self.storage.upload_file(path, util.BytesReader(to_upload))
예제 #19
0
 def query(self, tr, **kwargs):
     if not kwargs:
         kwargs = {}
     self.socket.send(b"query", zmq.SNDMORE)
     self.socket.send(pack([tr, kwargs]))
     res = self.socket.recv()
     msg = unpack(self.socket.recv())
     if res == b"ok":
         return msg
     else:
         return None
예제 #20
0
 def login(self, **kwargs):
     assert all(key in kwargs for key in ("id", "passwd"))
     assert all(isinstance(val, str) for val in kwargs.values())
     self.socket.send(b"login", zmq.SNDMORE)
     self.socket.send(pack(kwargs))
     res = unpack(self.socket.recv())
     if not res:
         log.info("login falied")
     else:
         log.info("login ok")
     return res
예제 #21
0
def sign_attached(message, private_key, chunk_size):
    output = io.BytesIO()
    public_key = private_key[32:]
    header_hash = write_header(public_key, 1, output)

    packetnum = 0
    for chunk in chunks_with_empty(message, chunk_size):
        packetnum_64 = packetnum.to_bytes(8, 'big')
        payload_digest = hashlib.sha512(
            header_hash + packetnum_64 + chunk).digest()
        payload_sig_text = b"saltpack attached signature\0" + payload_digest
        payload_sig = nacl.bindings.crypto_sign(payload_sig_text, private_key)
        detached_payload_sig = payload_sig[:64]
        packet = [
            detached_payload_sig,
            chunk,
        ]
        umsgpack.pack(packet, output)
        packetnum += 1

    return output.getvalue()
예제 #22
0
def main():
    args = get_args()
    with open(args['input'], 'rb') as f:
        payload = umsgpack.unpack(f)

    keys = ['lobby', 'rule', 'map', 'weapon', 'result', 'kill', 'death',
            'rank', 'rank_exp', 'rank_after', 'rank_exp_after', 'link_url']
    for key in keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload.pop(key)
        else:
            prev_value = payload.get(key, '')
            payload[key] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    output = args.get('output') or args['input']
    with open(output, 'wb') as f:
        umsgpack.pack(payload, f)
예제 #23
0
 def put_nowait(self, obj):
     if self.lazy_limit and self.qsize_diff < self.qsize_diff_limit:
         pass
     elif self.full():
         raise BaseQueue.Full
     else:
         self.qsize_diff = 0
     with self.lock:
         self.qsize_diff += 1
         msg = amqp.Message(umsgpack.pack(obj))
         return self.channel.basic_publish(msg, exchange = "", routing_key =
                 self.name)
예제 #24
0
def main():
    args = get_args()
    with open(args['input'], 'rb') as f:
        payload = umsgpack.unpack(f)

    keys = [
        'lobby', 'rule', 'map', 'weapon', 'result', 'kill', 'death', 'rank',
        'rank_exp', 'rank_after', 'rank_exp_after', 'link_url'
    ]
    for key in keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload.pop(key)
        else:
            prev_value = payload.get(key, '')
            payload[key] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    output = args.get('output') or args['input']
    with open(output, 'wb') as f:
        umsgpack.pack(payload, f)
예제 #25
0
def main():
    args = get_args()
    with open(args['input'], 'rb') as f:
        payload = umsgpack.unpack(f)

    keys = [
        'lobby', 'rule', 'map', 'weapon', 'result', 'kill', 'death', 'rank',
        'rank_exp', 'rank_after', 'rank_exp_after', 'link_url'
    ]
    for key in keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload.pop(key)
        else:
            prev_value = payload.get(key, '')
            payload[key] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    # Gears. Primary ability is only supported.
    gear_keys = ['clothing', 'headgear', 'shoes']
    for key in gear_keys:
        value = args.get(key)
        if not value:
            continue
        if value == 'DELETE':
            prev_value = payload['gears'].pop(key)
        else:
            prev_value = payload['gears'][key].get('primary_ability', '')
            payload['gears'][key]['primary_ability'] = args[key]
        print('Modified %s : %s -> %s' % (key, str(prev_value), str(value)))

    output = args.get('output') or args['input']
    with open(output, 'wb') as f:
        umsgpack.pack(payload, f)
예제 #26
0
out = gzip.open(output_file, "wb")

subjects = os.listdir(db_dir)

mean_image = np.zeros((64, 64), dtype=np.float32)

image_files = []

subject_image_map = {s: 0 for s in subjects}

print("> Finding subjects...")
for subject in tqdm(subjects):
    for root, dirs, files in os.walk(os.path.join(db_dir, subject)):
        for file in files:
            if file.startswith("face") and file.endswith(".png"):
                image_files.append((subject, os.path.join(root, file)))
                subject_image_map[subject] += 1

num_images = len(image_files)
umsgpack.pack(num_images, out)

umsgpack.pack(subject_image_map, out)

print("> Packaging images...")
for (subject, file) in tqdm(image_files):
    img = cv2.imread(file, 0)
    mean_image += img.astype(np.float32) / num_images
    out.write(umsgpack.packb((subject, img.tolist())))

umsgpack.pack(mean_image.tolist(), out)
 def test_streaming_writer(self):
     # Try first composite test vector
     (_, obj, data) = composite_test_vectors[0]
     writer = io.BytesIO()
     umsgpack.pack(obj, writer)
     self.assertTrue(writer.getvalue(), data)
예제 #28
0
    def backup(self):
        """Back up this entry

        Reads this entry in from the file system, creates one or more object
        payloads, and yields them to the caller for uploading to the backing
        store. The caller is expected to send the Object instance
        back into this iterator function.

        Yields: (payload_buffer, list_of_child_Object_instances)
        Caller sends: models.Object instance of the last yielded payload

        The payload_buffer is a file-like object ready for reading.
        Usually a BytesIO instance.

        Note: this sequence of operations was chosen over having this
        method upload the objects itself so that the caller may choose to
        buffer and upload objects in batch. It's also more flexible in
        several ways. E.g. while a recursive algorithm would
        have to upload items in a post-order traversal of the tree, here
        the caller is free to do a SQL query to get items ordered by any
        criteria. Like, say, all small files first and pack them together into
        a single upload.

        For directories: yields a single payload for the directory entry.
        Raises a DependencyError if one or more children do not have an
        obj already. It's the caller's responsibility to call backup() on
        entries in an order to avoid dependency issues.

        For files: yields one or more payloads for the file's contents,
        then finally a payload for the inode entry.

        IMPORTANT: every exit point from this function must either update
        this entry's obj to a non-null value, OR delete the entry before
        returning.
        """
        try:
            stat_result = os.lstat(self.path)
        except (FileNotFoundError, NotADirectoryError):
            scanlogger.info("File disappeared: {}".format(self))
            self.delete()
            return

        # If this entry is significantly different from what it looked like
        # when it was scanned, then we shouldn't try to back it up. The logic
        # for managing child references and such lives in the scan() method,
        # so delete this entry and let it get re-created next scan.
        if stat.S_IFMT(self.st_mode) != stat.S_IFMT(stat_result.st_mode):
            scanlogger.warning("File changed type since scan, deleting: "
                               "{}".format(self))
            self.delete()
            return

        self.update_stat_info(stat_result)

        if stat.S_ISREG(self.st_mode):
            # File
            chunks = []
            childobjs = []

            try:
                with self._open_file() as fobj:
                    for pos, chunk in chunker.DefaultChunker(fobj):
                        buf = io.BytesIO()
                        umsgpack.pack("blob", buf)
                        umsgpack.pack(chunk, buf)
                        buf.seek(0)
                        chunk_obj = yield (buf, [])
                        childobjs.append(chunk_obj)
                        chunks.append((pos, chunk_obj.objid))
            except FileNotFoundError:
                scanlogger.info("File disappeared: {}".format(self))
                self.delete()
                return
            except OSError as e:
                # This happens with permission denied errors
                scanlogger.exception("Error in system call when reading file "
                                     "{}".format(self))
                # In order to not crash the entire backup, we must delete
                # this entry so that the parent directory can still be backed
                # up. This code path may leave one or more objects saved to
                # the remote storage, but there's not much we can do about
                # that here. (Basically, since every exit from this method
                # must either acquire and save an obj or delete itself,
                # we have no choice)
                self.delete()
                return

            # Now construct the payload for the inode
            buf = io.BytesIO()
            umsgpack.pack("inode", buf)
            info = dict(
                size=stat_result.st_size,
                inode=stat_result.st_ino,
                uid=stat_result.st_uid,
                gid=stat_result.st_gid,
                mode=stat_result.st_mode,
                mtime=stat_result.st_mtime_ns,
                atime=stat_result.st_atime_ns,
            )
            umsgpack.pack(info, buf)
            umsgpack.pack(chunks, buf)
            buf.seek(0)

            self.obj = yield (buf, childobjs)
            scanlogger.info("Backed up file into {} objects: {}".format(
                len(chunks) + 1, self))

        elif stat.S_ISDIR(self.st_mode):
            # Directory
            # Note: backing up a directory doesn't involve reading
            # from the filesystem aside from the lstat() call from above. All
            # the information we need is already in the database.
            children = list(self.children.all().select_related("obj"))

            # This block asserts all children have been backed up before
            # entering this method. If they haven't, then the caller is in
            # error. The current backup strategy involves the caller
            # traversing nodes to back them up in an order that avoids
            # dependency issues.
            # A simplified backup strategy would be to make this method
            # recursive (using `yield from`) and then just call backup on the
            # root nodes. There's no reason I can think of that that wouldn't
            # work. Enforcing this here is just a sanity check for the current
            # backup strategy.
            if any(c.obj is None for c in children):
                raise DependencyError(
                    "{} depends on these paths, but they haven't been "
                    "backed up yet. This is a bug. {}"
                    "".format(
                        self.printablepath,
                        ", ".join(c.printablepath for c in children
                                  if c.obj is None),
                    ))

            buf = io.BytesIO()
            umsgpack.pack("tree", buf)
            info = dict(
                uid=stat_result.st_uid,
                gid=stat_result.st_gid,
                mode=stat_result.st_mode,
                mtime=stat_result.st_mtime_ns,
                atime=stat_result.st_atime_ns,
            )
            umsgpack.pack(info, buf)
            umsgpack.pack(
                # We have to store the original binary representation of
                # the filename or msgpack will error at filenames with
                # bad encodings
                [(os.fsencode(c.name), c.obj.objid) for c in children],
                buf,
            )
            buf.seek(0)

            self.obj = yield (buf, (c.obj for c in children))

            scanlogger.info("Backed up dir: {}".format(self))

        else:
            scanlogger.warning(
                "Unknown file type, not backing up {}".format(self))
            self.delete()
            return

        self.save()
        return
예제 #29
0
파일: backup.py 프로젝트: brownan/backathon
def backup_iterator(fsentry, inline_threshold=2**21):
    """Back up an FSEntry object

    :type fsentry: models.FSEntry
    :param inline_threshold: Threshold in bytes below which file contents are
        inlined into the inode payload.

    This is a generator function. Its job is to take the given models.FSEntry
    object and create the models.Object object for the local cache database
    and corresponding payload to upload to the remote repository. Since some
    types of filesystem entries may be split across multiple objects (e.g.
    large files), this function may yield more than one Object and payload
    for a single FSEntry.

    This function's created Object and ObjectRelation instances are not saved to
    the database, as this function is not responsible for determining the
    object ids. Once yielded, the caller will generate the object id from the
    payload, and will do one of two things:

    1. If the objid does not yet exist in the Object table: Update the Object
    and ObjectRelation instances with the generated object id and save them
    to the database, atomically with uploading the payload to the repository.
    2. If the objid *does* exist in the Object table: do nothing

    Either way, the (saved or fetched) Object with a set objid is sent back
    into this generator function so it can be used in a subsequent
    ObjectRelation entry.

    This function is responsible for updating the FSEntry.obj foreign key field
    with the sent object after yielding a payload.

    Yields: (payload, Object, [ObjectRelation list])
    Caller sends: A models.Object instance with obj.objid set

    The payload is a file-like object ready for reading. Usually a BytesIO
    instance.

    For directories: yields a single payload for the directory entry.
    Raises a DependencyError if one or more children do not have an
    obj already. It's the caller's responsibility to call this function on
    entries in an order to avoid dependency issues.

    For files: yields one or more payloads for the file's contents,
    then finally a payload for the inode entry.

    IMPORTANT: every exit point from this function must either update
    this entry's obj field to a non-null value, OR delete the entry before
    returning. It is an error to leave an entry in the database with the
    obj field still null.
    """
    try:
        stat_result = os.lstat(fsentry.path)
    except (FileNotFoundError, NotADirectoryError):
        logger.info("File disappeared: {}".format(fsentry))
        fsentry.delete()
        return

    fsentry.update_stat_info(stat_result)

    obj = models.Object()
    relations = []  # type: list[models.ObjectRelation]

    if stat.S_ISREG(fsentry.st_mode):
        # Regular File

        # Fill in the Object
        obj.type = "inode"
        obj.file_size = stat_result.st_size
        obj.last_modified_time = datetime.datetime.fromtimestamp(
            stat_result.st_mtime,
            tz=pytz.UTC,
        )

        # Construct the payload
        inode_buf = io.BytesIO()
        umsgpack.pack("inode", inode_buf)
        info = dict(
            size=stat_result.st_size,
            inode=stat_result.st_ino,
            uid=stat_result.st_uid,
            gid=stat_result.st_gid,
            mode=stat_result.st_mode,
            mtime=stat_result.st_mtime_ns,
            atime=stat_result.st_atime_ns,
        )
        umsgpack.pack(info, inode_buf)

        try:
            with _open_file(fsentry.path) as fobj:
                if stat_result.st_size < inline_threshold:
                    # If the file size is below this threshold, put the contents
                    # as a blob right in the inode object. Don't bother with
                    # separate blob objects
                    umsgpack.pack(("immediate", fobj.read()), inode_buf)

                else:
                    # Break the file's contents into chunks and upload
                    # each chunk individually
                    chunk_list = []
                    for pos, chunk in chunker.FixedChunker(fobj):
                        buf = io.BytesIO()
                        umsgpack.pack("blob", buf)
                        umsgpack.pack(chunk, buf)
                        buf.seek(0)
                        chunk_obj = yield (buf, models.Object(type="blob"), [])
                        chunk_list.append((pos, chunk_obj.objid))
                        relations.append(
                            models.ObjectRelation(child=chunk_obj))
                    umsgpack.pack(("chunklist", chunk_list), inode_buf)

        except FileNotFoundError:
            logger.info("File disappeared: {}".format(fsentry))
            fsentry.delete()
            return
        except OSError:
            # This happens with permission denied errors
            logger.error("Error in system call when reading file "
                         "{}".format(fsentry))
            # In order to not crash the entire backup, we must delete
            # this entry so that the parent directory can still be backed
            # up. This code path may leave one or more objects saved to
            # the remote storage, but there's not much we can do about
            # that here. (Basically, since every exit from this method
            # must either acquire and save an obj or delete itself,
            # we have no choice)
            fsentry.delete()
            return

        inode_buf.seek(0)

        # Pass the object and payload to the caller for uploading
        fsentry.obj = yield (inode_buf, obj, relations)
        logger.info("Backed up file into {} objects: {}".format(
            len(relations) + 1, fsentry))

    elif stat.S_ISDIR(fsentry.st_mode):
        # Directory
        # Note: backing up a directory doesn't involve reading
        # from the filesystem aside from the lstat() call from above. All
        # the information we need is already in the database.
        children = list(fsentry.children.all())

        # This block asserts all children have been backed up before
        # entering this method. If they haven't, then the caller is in
        # error. The current backup strategy involves the caller
        # traversing nodes to back them up in an order that avoids
        # dependency issues.
        # A simplified backup strategy would be to make this method
        # recursive (using `yield from`) and then just call backup on the
        # root nodes. There's no reason I can think of that that wouldn't
        # work. Enforcing this here is just a sanity check for the current
        # backup strategy.
        if any(c.obj_id is None for c in children):
            raise DependencyError(
                "{} depends on these paths, but they haven't been "
                "backed up yet. This is a bug. {}"
                "".format(
                    fsentry.printablepath,
                    ", ".join(c.printablepath for c in children
                              if c.obj_id is None),
                ))

        obj.type = "tree"
        obj.last_modified_time = datetime.datetime.fromtimestamp(
            stat_result.st_mtime,
            tz=pytz.UTC,
        )
        relations = [
            models.ObjectRelation(
                child_id=c.obj_id,
                # Names are stored in the object relation model for
                # purposes of searching and directory listing. It's stored in
                # a utf-8 encoding with invalid bytes removed to make
                # searching and indexing possible, but the payload has the
                # original filename in it.
                name=os.fsencode(c.name).decode("utf-8", errors="ignore"),
            ) for c in children
        ]

        buf = io.BytesIO()
        umsgpack.pack("tree", buf)
        info = dict(
            uid=stat_result.st_uid,
            gid=stat_result.st_gid,
            mode=stat_result.st_mode,
            mtime=stat_result.st_mtime_ns,
            atime=stat_result.st_atime_ns,
        )
        umsgpack.pack(info, buf)
        umsgpack.pack(
            # We have to store the original binary representation of
            # the filename or msgpack will error at filenames with
            # bad encodings
            [(os.fsencode(c.name), c.obj_id) for c in children],
            buf,
        )
        buf.seek(0)

        fsentry.obj = yield (buf, obj, relations)

        logger.info("Backed up dir: {}".format(fsentry))

    elif stat.S_ISLNK(fsentry.st_mode):
        buf = io.BytesIO()
        umsgpack.pack("symlink", buf)
        info = dict(
            uid=stat_result.st_uid,
            gid=stat_result.st_gid,
            mode=stat_result.st_mode,
            mtime=stat_result.st_mtime_ns,
            atime=stat_result.st_atime_ns,
        )
        umsgpack.pack(info, buf)

        # Symlinks may be invalid utf-8 sequences. Make sure we re-encode
        # them to their original byte representation before saving
        umsgpack.pack(os.fsencode(os.readlink(fsentry.path)), buf)

        buf.seek(0)
        obj = models.Object()
        obj.type = "symlink"
        obj.last_modified_time = datetime.datetime.fromtimestamp(
            stat_result.st_mtime,
            tz=pytz.UTC,
        )
        fsentry.obj = yield (buf, obj, [])

    else:
        logger.warning("Unknown file type, not backing up {}".format(fsentry))
        fsentry.delete()
        return

    fsentry.save()
    return
예제 #30
0
try:
    from uio import BytesIO
    import umsgpack as msgpack
except:
    try:
        from io import BytesIO
        import msgpack
    except ImportError:
        print("SKIP")
        raise SystemExit

b = BytesIO()
msgpack.pack(False, s)
print(b.getvalue())

b = BytesIO()
msgpack.pack({"a": (-1, 0, 2, [3, None], 128)}, b)
print(b.getvalue())

# pack to a small-int not allowed
try:
    msgpack.pack(123, 1)
except (AttributeError, OSError):  # CPython and uPy have different errors
    print("Exception")

# pack to an object not allowed
try:
    msgpack.pack(123, {})
except (AttributeError, OSError):  # CPython and uPy have different errors
    print("Exception")
예제 #31
0
import sys
import json
import umsgpack

with open(sys.argv[1], "rt") as f:
    obj = json.load(f)

with open(sys.argv[2], "wb") as f:
    umsgpack.pack(obj, f)

예제 #32
0
 def _append(cls, flag, key, value, fp, **kwargs):
     fp.seek(0, 2)
     entry = (flag, key, value)
     umsgpack.pack(entry, fp)
     if kwargs.pop('flush', True):
         cls._flush(fp)
예제 #33
0
#!/usr/bin/env python

import umsgpack

packed = umsgpack.packb({u'compact': True, u'schema': 0})
unpacked = umsgpack.unpackb(packed)
print packed
print unpacked

with open('test.bin', 'w') as f:
    umsgpack.pack({u'compact': True, u'schema': 0}, f)
    umsgpack.pack([1, 2, 3], f)

with open('test.bin') as f:
    print umsgpack.unpack(f)
    print umsgpack.unpack(f)
 def test_streaming_writer(self):
     # Try first composite test vector
     (name, obj, data) = composite_test_vectors[0]
     writer = io.BytesIO()
     umsgpack.pack(obj, writer)
     self.assertTrue(writer.getvalue(), data)
예제 #35
0
 def dump(self, obj, f):
     """ Dump object to file stream. """
     umsgpack.pack(obj, f, ext_handlers=self._ext_dump())
예제 #36
0
 def pack_bloockchain(self, blockchain, outfile=None):
     if outfile is not None:
         umsgpack.pack([b.pack() for b in blockchain], outfile)
     else:
         return umsgpack.packb([b.pack() for b in blockchain])
예제 #37
0
파일: example.py 프로젝트: consen/demo
#!/usr/bin/env python

import umsgpack

packed = umsgpack.packb({u'compact': True, u'schema': 0})
unpacked = umsgpack.unpackb(packed)
print packed
print unpacked


with open('test.bin', 'w') as f:
    umsgpack.pack({u'compact': True, u'schema': 0}, f)
    umsgpack.pack([1, 2, 3], f)

with open('test.bin') as f:
    print umsgpack.unpack(f)
    print umsgpack.unpack(f)