コード例 #1
0
ファイル: IRCBot.py プロジェクト: erf-erf/psychic-happiness
    def on_pubmsg(self, serv, event):
        if event.arguments[0][0] == self.cmdprefix:
            # it is a command
            cmd, args = unpack(event.arguments[0][1:])
            print(event.source.nick, 'launched the', cmd, 'command, with the following arguments:', args)
            if cmd in fantasy.binding.keys():
                print('command exists!')
                # calling the function
                ret = fantasy.binding[cmd](serv, self, event, args)
                if ret is not None:
                    for i in ret.split('\n'):
                        for j in split_len(i.encode(), 470):
                            serv.privmsg(event.target, j.decode())

        if event.arguments[0][0] == self.adminprefix:
            # it is an admin command
            cmd, args = unpack(event.arguments[0][1:])
            print(event.source.nick, 'launched the', cmd, 'admin command, with the following arguments:', args)
            if cmd in admin.binding.keys():
                print('command exists!')
                # calling the function
                ret = admin.binding[cmd](serv, self, event, args)
                if ret is not None:
                    for i in ret.split('\n'):
                        for j in split_len(i.encode(), 470):
                            serv.privmsg(event.target, j.decode())
コード例 #2
0
ファイル: import_xml.py プロジェクト: bogolt/dclord
def processRawData(path):
	log.debug('processing raw data %s'%(path,))
	xml_dir = os.path.join(util.getTempDir(), config.options['data']['raw-xml-dir'])
	util.assureDirExist(xml_dir)
	base = os.path.basename(path)
	xml_path = os.path.join(xml_dir, base[:-3])
	util.unpack(path, xml_path)
	return load_xml(xml_path)
コード例 #3
0
def read_cache(stream):
    """Read a cache file from the given stream
    :return: tuple(version, entries_dict, extension_data, content_sha)
        * version is the integer version number
        * entries dict is a dictionary which maps IndexEntry instances to a path
            at a stage
        * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes
        * content_sha is a 20 byte sha on all cache file contents"""
    version, num_entries = read_header(stream)
    count = 0
    entries = dict()

    read = stream.read
    tell = stream.tell
    while count < num_entries:
        beginoffset = tell()
        ctime = unpack(">8s", read(8))[0]
        mtime = unpack(">8s", read(8))[0]
        (dev, ino, mode, uid, gid, size, sha, flags) = \
            unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
        path_size = flags & CE_NAMEMASK
        path = read(path_size)

        real_size = ((tell() - beginoffset + 8) & ~7)
        data = read((beginoffset + real_size) - tell())
        entry = IndexEntry(
            (mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
        # entry_key would be the method to use, but we safe the effort
        entries[(path, entry.stage)] = entry
        count += 1
    # END for each entry

    # the footer contains extension data and a sha on the content so far
    # Keep the extension footer,and verify we have a sha in the end
    # Extension data format is:
    # 4 bytes ID
    # 4 bytes length of chunk
    # repeated 0 - N times
    extension_data = stream.read(~0)
    assert len(
        extension_data
    ) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len(
        extension_data)

    content_sha = extension_data[-20:]

    # truncate the sha in the end as we will dynamically create it anyway
    extension_data = extension_data[:-20]

    return (version, entries, extension_data, content_sha)
コード例 #4
0
ファイル: fun.py プロジェクト: vakaras/rmtoo-old
def read_entry(stream):
	"""Return: One entry of the given stream"""
	beginoffset = stream.tell()
	read = stream.read
	ctime = unpack(">8s", read(8))[0]
	mtime = unpack(">8s", read(8))[0]
	(dev, ino, mode, uid, gid, size, sha, flags) = \
		unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
	path_size = flags & CE_NAMEMASK
	path = read(path_size)

	real_size = ((stream.tell() - beginoffset + 8) & ~7)
	data = read((beginoffset + real_size) - stream.tell())
	return IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
コード例 #5
0
def read_entry(stream):
    """Return: One entry of the given stream"""
    beginoffset = stream.tell()
    read = stream.read
    ctime = unpack(">8s", read(8))[0]
    mtime = unpack(">8s", read(8))[0]
    (dev, ino, mode, uid, gid, size, sha, flags) = \
     unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
    path_size = flags & CE_NAMEMASK
    path = read(path_size)

    real_size = ((stream.tell() - beginoffset + 8) & ~7)
    data = read((beginoffset + real_size) - stream.tell())
    return IndexEntry(
        (mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
コード例 #6
0
ファイル: fun.py プロジェクト: schlosser/go-to-sleep
def read_cache(stream):
    """Read a cache file from the given stream
    :return: tuple(version, entries_dict, extension_data, content_sha)
        * version is the integer version number
        * entries dict is a dictionary which maps IndexEntry instances to a path
            at a stage
        * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes
        * content_sha is a 20 byte sha on all cache file contents"""
    version, num_entries = read_header(stream)
    count = 0
    entries = dict()

    read = stream.read
    tell = stream.tell
    while count < num_entries:
        beginoffset = tell()
        ctime = unpack(">8s", read(8))[0]
        mtime = unpack(">8s", read(8))[0]
        (dev, ino, mode, uid, gid, size, sha, flags) = \
            unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
        path_size = flags & CE_NAMEMASK
        path = read(path_size)

        real_size = ((tell() - beginoffset + 8) & ~7)
        data = read((beginoffset + real_size) - tell())
        entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
        # entry_key would be the method to use, but we safe the effort
        entries[(path, entry.stage)] = entry
        count += 1
    # END for each entry

    # the footer contains extension data and a sha on the content so far
    # Keep the extension footer,and verify we have a sha in the end
    # Extension data format is:
    # 4 bytes ID
    # 4 bytes length of chunk
    # repeated 0 - N times
    extension_data = stream.read(~0)
    assert len(
        extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len(
        extension_data)

    content_sha = extension_data[-20:]

    # truncate the sha in the end as we will dynamically create it anyway
    extension_data = extension_data[:-20]

    return (version, entries, extension_data, content_sha)
コード例 #7
0
    def _learn(self, idx):
        t_states, t_actions, t_qvals = unpack(self._memory,
                                              self._model,
                                              self._gamma,
                                              self._unfolding_steps,
                                              "cuda")
        self._optimizer.zero_grad()
        t_logits, t_values = self._model(t_states)
        t_log_probs = t_logits.log_softmax(dim=1)
        t_probs = t_logits.softmax(dim=1)

        # Compute the value loss
        value_loss = F.mse_loss(t_values.squeeze(-1), t_qvals)

        # Compute the policy loss
        t_advantages = t_qvals - t_values.detach()
        policy_loss = -(t_advantages * t_log_probs[range(self._batch_size), t_actions]).mean()

        # Compute the entropy and record the original probabilities for later
        entropy = -(t_probs * t_log_probs).sum(dim=1).mean()
        old_probs = t_probs
        (policy_loss + value_loss - self._beta * entropy).backward()
        nn_utils.clip_grad_norm_(self._model.parameters(), self._clip_grad)
        self._optimizer.step()

        # Compute KL divergence
        new_probs = self._model(t_states)[0].softmax(dim=1)
        kl_divergence = -((new_probs / old_probs).log() * old_probs).sum(dim=1).mean()
        self._memory.clear()

        # Plot
        self._plotter.add_scalar("Combined Loss", (policy_loss + value_loss - self._beta * entropy).item(), idx)
        self._plotter.add_scalar("Entropy", entropy.item(), idx)
        self._plotter.add_scalar("KL Divergence", kl_divergence.item(), idx)
コード例 #8
0
	def print_line (self,start_time=None):
		t = (self.timestamp - start_time) if start_time and self.timestamp else self.timestamp
		if not self.removed:
			coords = unpack(self.coords)
			coords = special_coords[coords] if coords in special_coords else "(%s, %s)" % coords
			return "found %s at %s. (%s)" % (self.pickup().name, coords, t)
		else:
			return "lost %s! (%s)" % (self.pickup().name, t)
コード例 #9
0
 def run(self):
     while self.flag and self._flag:
         try:
             data, _ = self.__socket.recvfrom(65565)
         except Exception as err:
             util.d(err)
         else:
             self.__catalogue.touch(util.unpack(data))
コード例 #10
0
ファイル: core.py プロジェクト: jkmendix/m2ee-tools
    def unpack(self, mda_name):
        if util.unpack(self.config, mda_name):
            self.reload_config()
        else:
            return False

        post_unpack_hook = self.config.get_post_unpack_hook()
        if post_unpack_hook:
            util.run_post_unpack_hook(post_unpack_hook)
コード例 #11
0
def read_header(stream):
    """Return tuple(version_long, num_entries) from the given stream"""
    type_id = stream.read(4)
    if type_id != "DIRC":
        raise AssertionError("Invalid index file header: %r" % type_id)
    version, num_entries = unpack(">LL", stream.read(4 * 2))

    # TODO: handle version 3: extended data, see read-cache.c
    assert version in (1, 2)
    return version, num_entries
コード例 #12
0
ファイル: distributed_envs.py プロジェクト: Yar01av/Synapse
    def _learn(self, step_idx):
        # Extract the states, actions and values as tensors from the memory
        t_states, t_actions, t_qvals = unpack(self._memory,
                                              self._model,
                                              self._gamma,
                                              self._unfolding_steps,
                                              "cpu")
        # Clear the old gradients
        self._optimizer.zero_grad()

        # Convert the network output into actual probabilities and their logs for later use
        t_logits, t_values = self._model(t_states)
        t_log_probs = t_logits.log_softmax(dim=1)
        t_probs = t_logits.softmax(dim=1)

        # Compute the value loss
        value_loss = F.mse_loss(t_values.squeeze(-1), t_qvals)

        # Compute the policy loss
        t_advantages = t_qvals - t_values.detach()
        policy_loss = -(t_advantages * t_log_probs[range(self._batch_size), t_actions]).mean()

        # Compute the entropy and record the original probabilities for later
        entropy = -(t_probs * t_log_probs).sum(dim=1).mean()
        old_probs = t_probs

        # Put the complete expression for the loss together and backpropagate the gradients
        (policy_loss + value_loss - self._beta * entropy).backward()
        # nn_utils.clip_grad_norm_(self._model.parameters(), self._clip_grad)
        self._optimizer.step()

        # Compute KL divergence
        new_probs = self._model(t_states)[0].softmax(dim=1)
        kl_divergence = -((new_probs / old_probs).log() * old_probs).sum(dim=1).mean()

        # Empty the memory as the algorithm is on-policy and the policy has changed with the model
        self._memory.clear()

        # Plot
        self._plotter.add_scalar("Combined Loss", (policy_loss + value_loss - self._beta * entropy).item(), step_idx)
        self._plotter.add_scalar("Entropy", entropy.item(), step_idx)
        self._plotter.add_scalar("KL Divergence", kl_divergence.item(), step_idx)
コード例 #13
0
 def mtime(self):
     """See ctime property, but returns modification time """
     return unpack(">LL", self[5])
コード例 #14
0
 def ctime(self):
     """
     :return:
         Tuple(int_time_seconds_since_epoch, int_nano_seconds) of the
         file's creation time"""
     return unpack(">LL", self[4])
コード例 #15
0
 def unpack(self, mda_name):
     util.unpack(self.config, mda_name)
     self.reload_config()
     post_unpack_hook = self.config.get_post_unpack_hook()
     if post_unpack_hook:
         util.run_post_unpack_hook(post_unpack_hook)
コード例 #16
0
    except IOError:
        exit("Unable to open " + filename + ".")
    if log_filename == "stdout":
        log_file = sys.stdout
    else:
        try:
            log_file = open(log_filename, 'w')
        except IOError:
            exit("Unable to open " + log_filename + ".")

    next_acknum = 0

    # Receive first packet
    packet, addr = recv_sock.recvfrom(576)
    source_port, dest_port, seqnum, acknum, header_length, \
        ack, final, window_size, contents = util.unpack(packet)

    checksum = util.get_checksum(packet)
    packet_valid = checksum == 0 and next_acknum == acknum

    if packet_valid:
        recv_file.write(contents)
        next_acknum += 1

    log = str(datetime.datetime.now()) + " " + str(source_port) + " " + str(
        dest_port) + " " + str(seqnum) + " " + str(acknum)
    log_file.write(log + "\n")

    # Establish ack socket connection
    ack_sock.connect((sender_ip, sender_port))
    out_port = ack_sock.getsockname()[1]
コード例 #17
0
ファイル: sws.py プロジェクト: sabari003/duke
            estimated_rtt = recv_time - send_time
            dev_rtt = 0
            recv_sock.settimeout(timeout_time)
        except socket.timeout:
            retransmitted += 1
            continue

    while True:
        try:
            ack = recv_sock.recv(20)
            recv_time = time.time()

            # Unpack packet information
            ack_source_port, ack_dest_port, ack_seqnum,\
                ack_acknum, ack_header_length, ack_valid,\
                ack_final, ack_window_size, ack_contents = util.unpack(ack)

            log = str(datetime.datetime.now()) + " " + \
                  str(ack_source_port) + " " + \
                  str(ack_dest_port) + " " + \
                  str(ack_seqnum) + " " + \
                  str(ack_acknum) + "\n"

            # Log flags
            if ack_valid:
                log = log.strip("\n") + " ACK\n"
            if ack_final:
                log = log.strip("\n") + " FIN\n"

            # If valid, here we go!
            if ack_acknum == acknum and ack_valid: