Ejemplo n.º 1
0
    def on_load(self):
        self.sector_manager = Sector_Manager(SECTORS)
        self.route_manager = Route_Manager(ROUTES,
                                           test_routes=VISUALIZE,
                                           draw_paths=VISUALIZE)
        self.traffic_manager = Traffic_Manager(
            max_ac=MAX_AC,
            times=TIME_SEP,
            max_spd=CONSTRAINTS["cas"]["max"],
            min_spd=CONSTRAINTS["cas"]["min"],
            max_alt=32000,
            min_alt=32000,
            network=self.route_manager)

        self.memory = Memory()

        self.agent = Agent(state_size=STATE_SHAPE,
                           action_size=ACTION_SHAPE,
                           value_size=VALUE_SHAPE)

        try:
            self.agent.load(path=FILE + "best.h5")
        except:
            try:
                self.agent.load(path=FILE + ".h5")
            except:
                pass

        self.initilized = True

        print("ATC: READY")
        string = "=================================\n   UPDATE: RUNNING EPOCH {}\n=================================\n".format(
            self.format_epoch())
        self.print_all(string)
Ejemplo n.º 2
0
def main():
    # Obtendo argumentos do terminal
    args_length = len(sys.argv) - 1
    args = sys.argv[1:]

    if args_length != 2:
        print_args_error()
        return 0
    
    process_filename = args[0]
    files_filename = args[1]


    # Inicializar sistemas 
    global GLOBAL_clock
    global GLOBAL_processes
    GLOBAL_processes = load_processes(process_filename, files_filename)
    processes = processes_by_init_time(GLOBAL_processes)
    memory = Memory()

    # sistema de arquivos
        # inicializar
        # adicionar dados do arquivo
    filesys = FileSystem(files_filename)

    #recursos
    resources = ResourceManager()

    # escalonador
    scheduler = Scheduler(resources, memory)

   
    process_running = None 
    print(GLOBAL_clock)
    # loop pelo clock ate lista de processos gerais ficar vazia
    while GLOBAL_processes:
        # timer (clock)
            # retorna processos ou lista de processo
            # pelo clock
        proc_list = process_launcher(GLOBAL_clock, processes)

        # atualiza escalonador
            # checar bloqueados no escalonador
            # atualizar escalonador (aging)
        scheduler.update(process_running)
        # forma pacote de processos
        # envia pacote para o escalonador
        scheduler.send_ready_process(proc_list)
        # escalonador retorna processo a ser executado
            # retorna sinal se eh pra trocar ou nao o processo atual
        process_running = scheduler.get_process_to_execute(process_running)
        # dispatcher
        dispatch(process_running, memory, filesys, resources)
        
        GLOBAL_clock += 1
        #print(GLOBAL_clock)
    print(filesys)
Ejemplo n.º 3
0
    def _init_memory(self):
        """
        Initialize memory class when it's needed by pymem.
        """

        self.memory = Memory()
Ejemplo n.º 4
0
class Pymem(object):
    """Provides class instance methods for general process and memory
manipulations.
    """
    def __init__(self):
        """
        Initialize pymem objects
        """

        self.process = None
        self.memory = None
        self.module = None
        self.pid = None  # refractor
        self.process32 = None  # refractor
        self.process_handle = None

    def _init_process(self):
        """
        Initialize process class when it's needed by pymem.
        """

        self.process = Process()

    def _init_memory(self):
        """
        Initialize memory class when it's needed by pymem.
        """

        self.memory = Memory()

    def _init_module(self):
        """
        Initialize module class when it's needed by pymem.
        """

        self.module = Module()

    @is_init('process')
    def open_process(self, process_id, debug=True):
        """
        Opens a process for interaction.
        """

        if debug:
            if self.process.open_debug(process_id):
                self.process_handle = self.process.h_process
                self.pid = process_id
                self.process32 = self.process.process32
                return True
            return False
        return self.process.open(process_id)

    @is_init('process')
    def open_process_from_name(self, process_name, debug=True, number=0):
        """
        Opens a process from its name for interaction.
        """

        processes = process_from_name(process_name)
        if processes is not None:
            if len(processes) - 1 == number:
                process = processes[len(processes) - 1]
                return self.open_process(process.th32ProcessID, debug)
        return False

    @is_init('memory')
    def read_offset(self, address, selected_type):
        """
        Read memory from a process.
        If the type <T> is supported, this method will provide the required
        call in order to read, from the process. If either the type <T> is not
        supported or process is not Open, the method will raise an Exception.

        Supported types : float, int, uint, long, ulong, int64, uint64, byte
        """

        if self.process.is_process_open:
            if self.memory.is_process_set == False:
                self.memory.set_process(self.process.h_process)
            return self.memory.read_offset(address, selected_type)
        return False

    @is_init('module')
    def list_module32(self):
        """
        Return module (MODULEENTRY32) loaded by current process.
        """

        if self.process32 is not None:
            if self.module.is_process_set == False:
                self.module.set_process32(self.process32)
            return self.module.list_module32()
        return []

    @is_init('module')
    def has_module32(self, module):
        """
        Return True if current process has loaded given module (dll)
        """

        if self.process32 is not None:
            if self.module.is_process_set == False:
                self.module.set_process32(self.process32)
            return self.module.has_module32(module)
        return False
Ejemplo n.º 5
0
    def __init__(self,
                 neighbor_finder,
                 node_features,
                 edge_features,
                 device,
                 dropout=0.1,
                 memory_update_at_start=True,
                 message_dimension=100,
                 memory_dimension=200,
                 n_neighbors=None,
                 aggregator_type="last",
                 mean_time_shift_src=0,
                 std_time_shift_src=1,
                 mean_time_shift_dst=0,
                 std_time_shift_dst=1,
                 threshold=2):
        super(DGNN, self).__init__()
        self.neighbor_finder = neighbor_finder
        self.device = device
        self.logger = logging.getLogger(__name__)

        self.node_raw_features = torch.from_numpy(
            node_features.astype(np.float32)).to(device)
        self.edge_raw_features = torch.from_numpy(
            edge_features.astype(np.float32)).to(device)

        self.n_node_features = self.node_raw_features.shape[1]
        self.n_nodes = self.node_raw_features.shape[0]
        self.n_edge_features = self.edge_raw_features.shape[1]
        self.embedding_dimension = self.n_node_features
        self.n_neighbors = n_neighbors
        self.memory_s = None
        self.memory_g = None

        self.threshold = threshold
        self.mean_time_shift_src = mean_time_shift_src
        self.std_time_shift_src = std_time_shift_src
        self.mean_time_shift_dst = mean_time_shift_dst
        self.std_time_shift_dst = std_time_shift_dst
        self.memory_dimension = memory_dimension
        self.memory_update_at_start = memory_update_at_start
        self.message_dimension = message_dimension
        self.memory_merge = MemoryMerge(self.memory_dimension, self.device)
        self.memory_s = Memory(n_nodes=self.n_nodes,
                               memory_dimension=self.memory_dimension,
                               message_dimension=message_dimension,
                               device=device)
        self.memory_g = Memory(n_nodes=self.n_nodes,
                               memory_dimension=self.memory_dimension,
                               message_dimension=message_dimension,
                               device=device)
        self.message_dim = message_dimension
        self.message_aggregator = get_message_aggregator(
            aggregator_type=aggregator_type, device=device)
        self.message_function = MessageFunction(
            memory_dimension=memory_dimension,
            message_dimension=message_dimension,
            edge_dimension=self.n_edge_features,
            device=self.device)
        self.memory_updater_s = MemoryUpdater(
            memory=self.memory_s,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_src / 2,
            device=self.device)
        self.memory_updater_g = MemoryUpdater(
            memory=self.memory_g,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_dst / 2,
            device=self.device)
        self.propagater_s = Propagater(
            memory=self.memory_s,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_src / 2,
            neighbor_finder=self.neighbor_finder,
            n_neighbors=self.n_neighbors,
            tau=self.threshold,
            device=self.device)
        self.propagater_g = Propagater(
            memory=self.memory_g,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_dst / 2,
            neighbor_finder=self.neighbor_finder,
            n_neighbors=self.n_neighbors,
            tau=self.threshold,
            device=self.device)
        self.W_s = nn.Parameter(
            torch.zeros(
                (memory_dimension, memory_dimension // 2)).to(self.device))
        #nn.xavier_
        self.W_g = nn.Parameter(
            torch.zeros(
                (memory_dimension, memory_dimension // 2)).to(self.device))
Ejemplo n.º 6
0
class DGNN(nn.Module):
    def __init__(self,
                 neighbor_finder,
                 node_features,
                 edge_features,
                 device,
                 dropout=0.1,
                 memory_update_at_start=True,
                 message_dimension=100,
                 memory_dimension=200,
                 n_neighbors=None,
                 aggregator_type="last",
                 mean_time_shift_src=0,
                 std_time_shift_src=1,
                 mean_time_shift_dst=0,
                 std_time_shift_dst=1,
                 threshold=2):
        super(DGNN, self).__init__()
        self.neighbor_finder = neighbor_finder
        self.device = device
        self.logger = logging.getLogger(__name__)

        self.node_raw_features = torch.from_numpy(
            node_features.astype(np.float32)).to(device)
        self.edge_raw_features = torch.from_numpy(
            edge_features.astype(np.float32)).to(device)

        self.n_node_features = self.node_raw_features.shape[1]
        self.n_nodes = self.node_raw_features.shape[0]
        self.n_edge_features = self.edge_raw_features.shape[1]
        self.embedding_dimension = self.n_node_features
        self.n_neighbors = n_neighbors
        self.memory_s = None
        self.memory_g = None

        self.threshold = threshold
        self.mean_time_shift_src = mean_time_shift_src
        self.std_time_shift_src = std_time_shift_src
        self.mean_time_shift_dst = mean_time_shift_dst
        self.std_time_shift_dst = std_time_shift_dst
        self.memory_dimension = memory_dimension
        self.memory_update_at_start = memory_update_at_start
        self.message_dimension = message_dimension
        self.memory_merge = MemoryMerge(self.memory_dimension, self.device)
        self.memory_s = Memory(n_nodes=self.n_nodes,
                               memory_dimension=self.memory_dimension,
                               message_dimension=message_dimension,
                               device=device)
        self.memory_g = Memory(n_nodes=self.n_nodes,
                               memory_dimension=self.memory_dimension,
                               message_dimension=message_dimension,
                               device=device)
        self.message_dim = message_dimension
        self.message_aggregator = get_message_aggregator(
            aggregator_type=aggregator_type, device=device)
        self.message_function = MessageFunction(
            memory_dimension=memory_dimension,
            message_dimension=message_dimension,
            edge_dimension=self.n_edge_features,
            device=self.device)
        self.memory_updater_s = MemoryUpdater(
            memory=self.memory_s,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_src / 2,
            device=self.device)
        self.memory_updater_g = MemoryUpdater(
            memory=self.memory_g,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_dst / 2,
            device=self.device)
        self.propagater_s = Propagater(
            memory=self.memory_s,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_src / 2,
            neighbor_finder=self.neighbor_finder,
            n_neighbors=self.n_neighbors,
            tau=self.threshold,
            device=self.device)
        self.propagater_g = Propagater(
            memory=self.memory_g,
            message_dimension=message_dimension,
            memory_dimension=self.memory_dimension,
            mean_time_shift_src=self.mean_time_shift_dst / 2,
            neighbor_finder=self.neighbor_finder,
            n_neighbors=self.n_neighbors,
            tau=self.threshold,
            device=self.device)
        self.W_s = nn.Parameter(
            torch.zeros(
                (memory_dimension, memory_dimension // 2)).to(self.device))
        #nn.xavier_
        self.W_g = nn.Parameter(
            torch.zeros(
                (memory_dimension, memory_dimension // 2)).to(self.device))

    def update_memory(self, source_nodes, destination_nodes, messages_s,
                      messages_g):
        # Aggregate messages for the same nodes

        unique_src_nodes, unique_src_messages, unique_src_timestamps = self.message_aggregator.aggregate(
            source_nodes, messages_s)
        unique_des_nodes, unique_des_messages, unique_des_timestamps = self.message_aggregator.aggregate(
            destination_nodes, messages_g)

        # Update the memory with the aggregated messages
        self.memory_updater_s.update_memory(unique_src_nodes,
                                            unique_src_messages,
                                            timestamps=unique_src_timestamps)
        self.memory_updater_g.update_memory(unique_des_nodes,
                                            unique_des_messages,
                                            timestamps=unique_des_timestamps)

    def propagate(self, source_nodes, destination_nodes, messages_s,
                  messages_g):
        unique_src_nodes, unique_src_messages, unique_src_timestamps = self.message_aggregator.aggregate(
            source_nodes, messages_s)
        unique_des_nodes, unique_des_messages, unique_des_timestamps = self.message_aggregator.aggregate(
            destination_nodes, messages_g)

        self.propagater_s(self.memory_s.memory,
                          unique_src_nodes,
                          unique_src_messages,
                          timestamps=unique_src_timestamps)
        self.propagater_g(self.memory_g.memory,
                          unique_des_nodes,
                          unique_des_messages,
                          timestamps=unique_des_timestamps)

    def compute_loss(self, memory_s, memory_g, source_nodes,
                     destination_nodes):
        source_mem = self.memory_merge(memory_s[1][source_nodes],
                                       memory_g[1][source_nodes])
        destination_mem = self.memory_merge(memory_s[1][destination_nodes],
                                            memory_g[1][destination_nodes])
        source_emb = torch.matmul(source_mem, self.W_s)
        destination_emb = torch.matmul(destination_mem, self.W_g)
        score = torch.sum(source_emb * destination_emb, dim=1)
        return score.sigmoid()

    def forward(self,
                source_nodes,
                destination_nodes,
                negative_nodes,
                edge_times,
                edge_idxs,
                test=False):
        n_samples = len(source_nodes)
        nodes = np.concatenate(
            [source_nodes, destination_nodes, negative_nodes])
        positives = np.concatenate([source_nodes, destination_nodes])
        timestamps = np.concatenate([edge_times, edge_times])
        memory = None
        time_diffs = None
        memory_s, last_update_s, memory_g, last_update_g = \
            self.get_updated_memory(list(range(self.n_nodes)), list(range(self.n_nodes)),
                                    self.memory_s.messages, self.memory_g.messages)

        pos_score = self.compute_loss(memory_s, memory_g, source_nodes,
                                      destination_nodes)
        neg_score = self.compute_loss(memory_s, memory_g, source_nodes,
                                      negative_nodes)
        self.update_memory(source_nodes, destination_nodes,
                           self.memory_s.messages, self.memory_g.messages)
        self.propagate(source_nodes, destination_nodes, self.memory_s.messages,
                       self.memory_g.messages)
        self.memory_s.clear_messages(positives)
        self.memory_g.clear_messages(positives)
        unique_sources, source_id_to_messages = self.get_messages(
            source_nodes, destination_nodes, edge_times, edge_idxs)
        unique_destinations, destination_id_to_messages = self.get_messages(
            destination_nodes, source_nodes, edge_times, edge_idxs)
        self.memory_s.store_raw_messages(unique_sources, source_id_to_messages)
        self.memory_g.store_raw_messages(unique_destinations,
                                         destination_id_to_messages)
        if not test:
            return pos_score, neg_score
        else:
            source_mem = self.memory_merge(memory_s[1][source_nodes],
                                           memory_g[1][source_nodes])
            destination_mem = self.memory_merge(memory_s[1][destination_nodes],
                                                memory_g[1][destination_nodes])
            return source_mem, destination_mem

    def get_messages(self, source_nodes, destination_nodes, edge_times,
                     edge_idxs):
        edge_times = torch.from_numpy(edge_times).float().to(self.device)
        edge_features = self.edge_raw_features[edge_idxs]
        source_memory = self.memory_merge(
            self.memory_s.memory[1][source_nodes],
            self.memory_g.memory[1][source_nodes])
        destination_memory = self.memory_merge(
            self.memory_s.memory[1][destination_nodes],
            self.memory_g.memory[1][destination_nodes])

        source_message = self.message_function.compute_message(
            source_memory, destination_memory, edge_features)
        messages = defaultdict(list)
        unique_sources = np.unique(source_nodes)

        for i in range(len(source_nodes)):
            messages[source_nodes[i]].append(
                (source_message[i], edge_times[i]))

        return unique_sources, messages

    def get_updated_memory(self, source_nodes, destination_nodes, message_s,
                           message_g):
        unique_src_nodes, unique_src_messages, unique_src_timestamps = self.message_aggregator.aggregate(
            source_nodes, message_s)
        unique_des_nodes, unique_des_messages, unique_des_timestamps = self.message_aggregator.aggregate(
            destination_nodes, message_g)
        updated_src_memory, updated_src_last_update = self.memory_updater_s.update_memory(
            unique_src_nodes,
            unique_src_messages,
            timestamps=unique_src_timestamps,
            inplace=False)
        updated_des_memory, updated_des_last_update = self.memory_updater_g.update_memory(
            unique_des_nodes,
            unique_des_messages,
            timestamps=unique_des_timestamps,
            inplace=False)
        updated_src_memory = self.propagater_s(
            updated_src_memory,
            unique_src_nodes,
            unique_src_messages,
            timestamps=unique_src_timestamps,
            inplace=False)
        updated_des_memory = self.propagater_g(
            updated_des_memory,
            unique_des_nodes,
            unique_des_messages,
            timestamps=unique_des_timestamps,
            inplace=False)

        return updated_src_memory, updated_src_last_update, updated_des_memory, updated_des_last_update

    def set_neighbor_finder(self, neighbor_finder):
        self.neighbor_finder = neighbor_finder
        self.propagater_s.neighbor_finder = neighbor_finder
        self.propagater_g.neighbor_finder = neighbor_finder
Ejemplo n.º 7
0
    def __init__(self,
                 neighbor_finder,
                 node_features,
                 edge_features,
                 device,
                 n_layers=2,
                 n_heads=2,
                 dropout=0.1,
                 use_memory=False,
                 memory_update_at_start=True,
                 message_dimension=100,
                 memory_dimension=500,
                 embedding_module_type="graph_attention",
                 message_function="mlp",
                 mean_time_shift_src=0,
                 std_time_shift_src=1,
                 mean_time_shift_dst=0,
                 std_time_shift_dst=1,
                 n_neighbors=None,
                 aggregator_type="last",
                 memory_updater_type="gru",
                 use_destination_embedding_in_message=False,
                 use_source_embedding_in_message=False,
                 dyrep=False):
        super(TGN, self).__init__()

        self.n_layers = n_layers
        self.neighbor_finder = neighbor_finder
        self.device = device
        self.logger = logging.getLogger(__name__)

        self.node_raw_features = torch.from_numpy(
            node_features.astype(np.float32)).to(device)
        self.edge_raw_features = torch.from_numpy(
            edge_features.astype(np.float32)).to(device)
        self.n_node_features = self.node_raw_features.shape[1]
        self.n_nodes = self.node_raw_features.shape[0]
        self.n_edge_features = self.edge_raw_features.shape[1]
        self.embedding_dimension = self.n_node_features
        self.n_neighbors = n_neighbors
        self.embedding_module_type = embedding_module_type
        self.use_destination_embedding_in_message = use_destination_embedding_in_message
        self.use_source_embedding_in_message = use_source_embedding_in_message
        self.dyrep = dyrep

        self.use_memory = use_memory
        self.time_encoder = TimeEncode(dimension=self.n_node_features)
        self.memory = None

        self.mean_time_shift_src = mean_time_shift_src
        self.std_time_shift_src = std_time_shift_src
        self.mean_time_shift_dst = mean_time_shift_dst
        self.std_time_shift_dst = std_time_shift_dst

        if self.use_memory:
            self.memory_dimension = memory_dimension
            self.memory_update_at_start = memory_update_at_start
            raw_message_dimension = 2 * self.memory_dimension + self.n_edge_features + \
                                    self.time_encoder.dimension
            message_dimension = message_dimension if message_function != "identity" else raw_message_dimension
            self.memory = Memory(n_nodes=self.n_nodes,
                                 memory_dimension=self.memory_dimension,
                                 input_dimension=message_dimension,
                                 message_dimension=message_dimension,
                                 device=device)
            self.message_aggregator = get_message_aggregator(
                aggregator_type=aggregator_type, device=device)
            self.message_function = get_message_function(
                module_type=message_function,
                raw_message_dimension=raw_message_dimension,
                message_dimension=message_dimension)
            self.memory_updater = get_memory_updater(
                module_type=memory_updater_type,
                memory=self.memory,
                message_dimension=message_dimension,
                memory_dimension=self.memory_dimension,
                device=device)

        self.embedding_module_type = embedding_module_type

        self.embedding_module = get_embedding_module(
            module_type=embedding_module_type,
            node_features=self.node_raw_features,
            edge_features=self.edge_raw_features,
            memory=self.memory,
            neighbor_finder=self.neighbor_finder,
            time_encoder=self.time_encoder,
            n_layers=self.n_layers,
            n_node_features=self.n_node_features,
            n_edge_features=self.n_edge_features,
            n_time_features=self.n_node_features,
            embedding_dimension=self.embedding_dimension,
            device=self.device,
            n_heads=n_heads,
            dropout=dropout,
            use_memory=use_memory,
            n_neighbors=self.n_neighbors)

        # MLP to compute probability on an edge given two node embeddings
        self.affinity_score = MergeLayer(self.n_node_features,
                                         self.n_node_features,
                                         self.n_node_features, 1)
Ejemplo n.º 8
0
class TGN(torch.nn.Module):
    def __init__(self,
                 neighbor_finder,
                 node_features,
                 edge_features,
                 device,
                 n_layers=2,
                 n_heads=2,
                 dropout=0.1,
                 use_memory=False,
                 memory_update_at_start=True,
                 message_dimension=100,
                 memory_dimension=500,
                 embedding_module_type="graph_attention",
                 message_function="mlp",
                 mean_time_shift_src=0,
                 std_time_shift_src=1,
                 mean_time_shift_dst=0,
                 std_time_shift_dst=1,
                 n_neighbors=None,
                 aggregator_type="last",
                 memory_updater_type="gru",
                 use_destination_embedding_in_message=False,
                 use_source_embedding_in_message=False,
                 dyrep=False):
        super(TGN, self).__init__()

        self.n_layers = n_layers
        self.neighbor_finder = neighbor_finder
        self.device = device
        self.logger = logging.getLogger(__name__)

        self.node_raw_features = torch.from_numpy(
            node_features.astype(np.float32)).to(device)
        self.edge_raw_features = torch.from_numpy(
            edge_features.astype(np.float32)).to(device)
        self.n_node_features = self.node_raw_features.shape[1]
        self.n_nodes = self.node_raw_features.shape[0]
        self.n_edge_features = self.edge_raw_features.shape[1]
        self.embedding_dimension = self.n_node_features
        self.n_neighbors = n_neighbors
        self.embedding_module_type = embedding_module_type
        self.use_destination_embedding_in_message = use_destination_embedding_in_message
        self.use_source_embedding_in_message = use_source_embedding_in_message
        self.dyrep = dyrep

        self.use_memory = use_memory
        self.time_encoder = TimeEncode(dimension=self.n_node_features)
        self.memory = None

        self.mean_time_shift_src = mean_time_shift_src
        self.std_time_shift_src = std_time_shift_src
        self.mean_time_shift_dst = mean_time_shift_dst
        self.std_time_shift_dst = std_time_shift_dst

        if self.use_memory:
            self.memory_dimension = memory_dimension
            self.memory_update_at_start = memory_update_at_start
            raw_message_dimension = 2 * self.memory_dimension + self.n_edge_features + \
                                    self.time_encoder.dimension
            message_dimension = message_dimension if message_function != "identity" else raw_message_dimension
            self.memory = Memory(n_nodes=self.n_nodes,
                                 memory_dimension=self.memory_dimension,
                                 input_dimension=message_dimension,
                                 message_dimension=message_dimension,
                                 device=device)
            self.message_aggregator = get_message_aggregator(
                aggregator_type=aggregator_type, device=device)
            self.message_function = get_message_function(
                module_type=message_function,
                raw_message_dimension=raw_message_dimension,
                message_dimension=message_dimension)
            self.memory_updater = get_memory_updater(
                module_type=memory_updater_type,
                memory=self.memory,
                message_dimension=message_dimension,
                memory_dimension=self.memory_dimension,
                device=device)

        self.embedding_module_type = embedding_module_type

        self.embedding_module = get_embedding_module(
            module_type=embedding_module_type,
            node_features=self.node_raw_features,
            edge_features=self.edge_raw_features,
            memory=self.memory,
            neighbor_finder=self.neighbor_finder,
            time_encoder=self.time_encoder,
            n_layers=self.n_layers,
            n_node_features=self.n_node_features,
            n_edge_features=self.n_edge_features,
            n_time_features=self.n_node_features,
            embedding_dimension=self.embedding_dimension,
            device=self.device,
            n_heads=n_heads,
            dropout=dropout,
            use_memory=use_memory,
            n_neighbors=self.n_neighbors)

        # MLP to compute probability on an edge given two node embeddings
        self.affinity_score = MergeLayer(self.n_node_features,
                                         self.n_node_features,
                                         self.n_node_features, 1)

    def compute_temporal_embeddings(self,
                                    source_nodes,
                                    destination_nodes,
                                    negative_nodes,
                                    edge_times,
                                    edge_idxs,
                                    n_neighbors=20):
        """
    Compute temporal embeddings for sources, destinations, and negatively sampled destinations.

    source_nodes [batch_size]: source ids.
    :param destination_nodes [batch_size]: destination ids
    :param negative_nodes [batch_size]: ids of negative sampled destination
    :param edge_times [batch_size]: timestamp of interaction
    :param edge_idxs [batch_size]: index of interaction
    :param n_neighbors [scalar]: number of temporal neighbor to consider in each convolutional
    layer
    :return: Temporal embeddings for sources, destinations and negatives
    """

        n_samples = len(source_nodes)
        nodes = np.concatenate(
            [source_nodes, destination_nodes, negative_nodes])
        positives = np.concatenate([source_nodes, destination_nodes])
        timestamps = np.concatenate([edge_times, edge_times, edge_times])

        memory = None
        time_diffs = None

        # get memory from before the current timestep
        if self.use_memory:
            if self.memory_update_at_start:
                # Update memory for all nodes with messages stored in previous batches
                memory, last_update = self.get_updated_memory(
                    list(range(self.n_nodes)), self.memory.messages)
            else:
                memory = self.memory.get_memory(list(range(self.n_nodes)))
                last_update = self.memory.last_update

            # Compute differences between the time the memory of a node was last updated,
            # and the time for which we want to compute the embedding of a node
            source_time_diffs = torch.LongTensor(edge_times).to(
                self.device) - last_update[source_nodes].long()
            source_time_diffs = (source_time_diffs - self.mean_time_shift_src
                                 ) / self.std_time_shift_src
            destination_time_diffs = torch.LongTensor(edge_times).to(
                self.device) - last_update[destination_nodes].long()
            destination_time_diffs = (
                destination_time_diffs -
                self.mean_time_shift_dst) / self.std_time_shift_dst
            negative_time_diffs = torch.LongTensor(edge_times).to(
                self.device) - last_update[negative_nodes].long()
            negative_time_diffs = (
                negative_time_diffs -
                self.mean_time_shift_dst) / self.std_time_shift_dst

            time_diffs = torch.cat([
                source_time_diffs, destination_time_diffs, negative_time_diffs
            ],
                                   dim=0)

        #TODO where are the node / edge features? shouldn't they be input in the compute_embedding function?
        # Compute the embeddings using the embedding module, combines memory + current batch of interactions
        pdb.set_trace()
        node_embedding = self.embedding_module.compute_embedding(
            memory=memory,
            source_nodes=nodes,
            timestamps=timestamps,
            n_layers=self.n_layers,
            n_neighbors=n_neighbors,
            time_diffs=time_diffs)

        source_node_embedding = node_embedding[:n_samples]
        destination_node_embedding = node_embedding[n_samples:2 * n_samples]
        negative_node_embedding = node_embedding[2 * n_samples:]

        # update memory with information from current batch
        if self.use_memory:
            if self.memory_update_at_start:
                # Persist the updates to the memory only for sources and destinations (since now we have
                # new messages for them)
                self.update_memory(positives, self.memory.messages)

                assert torch.allclose(memory[positives], self.memory.get_memory(positives), atol=1e-5), \
                  "Something wrong in how the memory was updated"

                # Remove messages for the positives since we have already updated the memory using them
                self.memory.clear_messages(positives)

            unique_sources, source_id_to_messages = self.get_raw_messages(
                source_nodes, source_node_embedding, destination_nodes,
                destination_node_embedding, edge_times, edge_idxs)
            unique_destinations, destination_id_to_messages = self.get_raw_messages(
                destination_nodes, destination_node_embedding, source_nodes,
                source_node_embedding, edge_times, edge_idxs)
            if self.memory_update_at_start:
                self.memory.store_raw_messages(unique_sources,
                                               source_id_to_messages)
                self.memory.store_raw_messages(unique_destinations,
                                               destination_id_to_messages)
            else:
                self.update_memory(unique_sources, source_id_to_messages)
                self.update_memory(unique_destinations,
                                   destination_id_to_messages)

            if self.dyrep:
                source_node_embedding = memory[source_nodes]
                destination_node_embedding = memory[destination_nodes]
                negative_node_embedding = memory[negative_nodes]

        return source_node_embedding, destination_node_embedding, negative_node_embedding

    def compute_edge_probabilities(self,
                                   source_nodes,
                                   destination_nodes,
                                   negative_nodes,
                                   edge_times,
                                   edge_idxs,
                                   n_neighbors=20):
        """
    Compute probabilities for edges between sources and destination and between sources and
    negatives by first computing temporal embeddings using the TGN encoder and then feeding them
    into the MLP decoder.
    :param destination_nodes [batch_size]: destination ids
    :param negative_nodes [batch_size]: ids of negative sampled destination
    :param edge_times [batch_size]: timestamp of interaction
    :param edge_idxs [batch_size]: index of interaction
    :param n_neighbors [scalar]: number of temporal neighbor to consider in each convolutional
    layer
    :return: Probabilities for both the positive and negative edges
    """
        n_samples = len(source_nodes)
        source_node_embedding, destination_node_embedding, negative_node_embedding = self.compute_temporal_embeddings(
            source_nodes, destination_nodes, negative_nodes, edge_times,
            edge_idxs, n_neighbors)

        score = self.affinity_score(
            torch.cat([source_node_embedding, source_node_embedding], dim=0),
            torch.cat([destination_node_embedding,
                       negative_node_embedding])).squeeze(dim=0)
        pos_score = score[:n_samples]
        neg_score = score[n_samples:]

        return pos_score.sigmoid(), neg_score.sigmoid()

    def update_memory(self, nodes, messages):
        # Aggregate messages for the same nodes
        unique_nodes, unique_messages, unique_timestamps = \
          self.message_aggregator.aggregate(
            nodes,
            messages)

        if len(unique_nodes) > 0:
            unique_messages = self.message_function.compute_message(
                unique_messages)

        # Update the memory with the aggregated messages
        self.memory_updater.update_memory(unique_nodes,
                                          unique_messages,
                                          timestamps=unique_timestamps)

    def get_updated_memory(self, nodes, messages):
        # Aggregate messages for the same nodes
        unique_nodes, unique_messages, unique_timestamps = \
          self.message_aggregator.aggregate(
            nodes,
            messages)

        if len(unique_nodes) > 0:
            unique_messages = self.message_function.compute_message(
                unique_messages)

        updated_memory, updated_last_update = self.memory_updater.get_updated_memory(
            unique_nodes, unique_messages, timestamps=unique_timestamps)

        return updated_memory, updated_last_update

    def get_raw_messages(self, source_nodes, source_node_embedding,
                         destination_nodes, destination_node_embedding,
                         edge_times, edge_idxs):
        edge_times = torch.from_numpy(edge_times).float().to(self.device)
        edge_features = self.edge_raw_features[edge_idxs]

        source_memory = self.memory.get_memory(source_nodes) if not \
          self.use_source_embedding_in_message else source_node_embedding
        destination_memory = self.memory.get_memory(destination_nodes) if \
          not self.use_destination_embedding_in_message else destination_node_embedding

        source_time_delta = edge_times - self.memory.last_update[source_nodes]
        source_time_delta_encoding = self.time_encoder(
            source_time_delta.unsqueeze(dim=1)).view(len(source_nodes), -1)

        source_message = torch.cat([
            source_memory, destination_memory, edge_features,
            source_time_delta_encoding
        ],
                                   dim=1)
        messages = defaultdict(list)
        unique_sources = np.unique(source_nodes)

        for i in range(len(source_nodes)):
            messages[source_nodes[i]].append(
                (source_message[i], edge_times[i]))

        return unique_sources, messages

    def set_neighbor_finder(self, neighbor_finder):
        self.neighbor_finder = neighbor_finder
        self.embedding_module.neighbor_finder = neighbor_finder
Ejemplo n.º 9
0
class TGN(torch.nn.Module):
  """
  TGN model
  
  INIT INPUTS:
      neighbor_finder: NeighborFinder instance
      node_features: Nodes raw features of shape [n_nodes, node_feat_dim]
      edge_features: Edges raw features of shape [n_interactinon, edge_feat_dim]
      n_layers: 'L' in the paper
      n_heads: Number of attention heads
      dropout: For nn.MultiheadAttention()
      use_memory: Bool variable, whether to augment the model with a node memory
      memory_update_at_start: Bool variable, whether to update memory at the start of the batch
      message_dimension: Node message dimension for m_i(t), default 100
      memory_dimension: Node memory dimension for s_i(t), default 172
      embedding_module_type: How to calculate embedding, default 'graph_attention'
      message_function: How to calculate node message, default 'mlp'
      mean_time_shift_src: 
      std_time_shift_src:
      mean_time_shift_dst:
      std_time_shift_dst:
      n_neighbors: How many temporal neighbos to be extracted
      aggregator_type: How to aggregate messages, default 'last'
      memory_updater_type: How to update node memory
      use_destination_embedding_in_message:
      use_source_embedding_in_message:
  """
    
  def __init__(self, neighbor_finder, node_features, edge_features, device, n_layers=2,
               n_heads=2, dropout=0.1, use_memory=True, memory_update_at_start=True, 
               message_dimension=100, memory_dimension=172, embedding_module_type="graph_attention",
               message_function="mlp", mean_time_shift_src=0, std_time_shift_src=1, 
               mean_time_shift_dst=0, std_time_shift_dst=1, n_neighbors=None, aggregator_type="last",
               memory_updater_type="gru", use_destination_embedding_in_message=False,
               use_source_embedding_in_message=False):
      
    super(TGN, self).__init__()

    self.n_layers = n_layers
    self.neighbor_finder = neighbor_finder
    self.device = device
    self.logger = logging.getLogger(__name__)

    self.node_raw_features = torch.from_numpy(node_features.astype(np.float32)).to(device)  # node features to tensor
    self.edge_raw_features = torch.from_numpy(edge_features.astype(np.float32)).to(device)  # edge features to tensor

    self.n_node_features = self.node_raw_features.shape[1]  # node_feat_dim
    self.n_nodes = self.node_raw_features.shape[0]          # n_nodes
    self.n_edge_features = self.edge_raw_features.shape[1]  # edge_feat_dim
    self.embedding_dimension = self.n_node_features         # emb_dim = node_feat_dim
    self.n_neighbors = n_neighbors
    self.embedding_module_type = embedding_module_type
    self.use_destination_embedding_in_message = use_destination_embedding_in_message
    self.use_source_embedding_in_message = use_source_embedding_in_message
    
    self.use_memory = use_memory
    self.time_encoder = TimeEncode(dimension=self.n_node_features)  # encodes time to shape [node_feat_dim]
    self.memory = None

    self.mean_time_shift_src = mean_time_shift_src
    self.std_time_shift_src = std_time_shift_src
    self.mean_time_shift_dst = mean_time_shift_dst
    self.std_time_shift_dst = std_time_shift_dst

    if self.use_memory:
        
      self.memory_dimension = memory_dimension
      self.memory_update_at_start = memory_update_at_start
      # m_raw_i = (s_i || s_j || t || e)
      raw_message_dimension = 2 * self.memory_dimension + self.n_edge_features + self.time_encoder.dimension  # raw message dim
      message_dimension = message_dimension if message_function != "identity" else raw_message_dimension      # message dim
      
      self.memory = Memory(n_nodes=self.n_nodes,
                           memory_dimension=self.memory_dimension,
                           input_dimension=message_dimension,
                           device=device)
      
      self.message_function = get_message_function(module_type=message_function,
                                                   raw_message_dimension=raw_message_dimension,
                                                   message_dimension=message_dimension)                 # message function
      
      self.message_aggregator = get_message_aggregator(aggregator_type=aggregator_type, device=device)  # message aggregator

      # self.memory_updater = GRUMemoryUpdater(memory=self.memory,
      #                                        message_dimension=message_dimension,
      #                                        memory_dimension=self.memory_dimension, device=device)

      self.memory_updater = get_memory_updater(module_type=memory_updater_type, 
                                               memory=self.memory, message_dimension=message_dimension, 
                                               memory_dimension=self.memory_dimension, device=device)     # memory updator
      
      self.embedding_module_type = embedding_module_type
    
      # self.embedding_module = get_embedding_module(module_type=embedding_module_type,
      #                                              node_features=self.node_raw_features,
      #                                              edge_features=self.edge_raw_features,
      #                                              neighbor_finder=self.neighbor_finder,
      #                                              time_encoder=self.time_encoder,
      #                                              n_layers=self.n_layers,
      #                                              n_node_features=self.n_node_features,
      #                                              n_edge_features=self.n_edge_features,
      #                                              n_time_features=self.n_node_features,
      #                                              embedding_dimension=self.embedding_dimension,
      #                                              device=self.device,
      #                                              n_heads=n_heads, dropout=dropout,
      #                                              use_memory=use_memory,
      #                                              n_neighbors=self.n_neighbors)
      
      self.embedding_module = get_embedding_module(module_type=embedding_module_type,
                                                   node_features=self.node_raw_features,
                                                   edge_features=self.edge_raw_features,
                                                   neighbor_finder=self.neighbor_finder,
                                                   time_encoder=self.time_encoder,
                                                   n_layers=self.n_layers,
                                                   n_node_features=self.n_node_features,
                                                   n_edge_features=self.n_edge_features,
                                                   n_time_features=self.n_node_features,
                                                   embedding_dimension=self.embedding_dimension,
                                                   device=self.device,
                                                   n_heads=n_heads, dropout=dropout,
                                                   use_memory=use_memory,
                                                   n_neighbors=self.n_neighbors)                         # embedding module
    
      # MLP to compute probability on an edge given two node embeddings
      self.affinity_score = MergeLayer(self.n_node_features, self.n_node_features, self.n_node_features, 1)
    
  def set_neighbor_finder(self, neighbor_finder):
    self.neighbor_finder = neighbor_finder
    self.embedding_module.neighbor_finder = neighbor_finder
    
  def get_updated_memory(self, nodes, messages):
    """
    Get (but not persist) updated nodes' memory by using messages (AGG-->MSG-->MEM, while in paper the order is MSG-->AGG-->MEM)
    
    INPUTS:
        nodes: A list of length n_nodes; Node ids
        message: A dictionary {node_id:[([message_1], timestamp_1), ([message_2], timestamp_2), ...]}; Messages in previous batch
        
    OUTPUTS:
        updated_memory: A tensor of shape [unique_nodes, memory_dimension]
        updated_last_update: A tensor of shape [unique_nodes]    
    """
    # Aggregate messages for the same nodes
    unique_nodes, unique_messages, unique_timestamps = self.message_aggregator.aggregate(nodes, messages)
    
    if len(unique_nodes) > 0:
      unique_messages = self.message_function.compute_message(unique_messages)
    
    updated_memory, updated_last_update = self.memory_updater.get_updated_memory(unique_nodes,
                                                                                 unique_messages,
                                                                                 timestamps=unique_timestamps)

    return updated_memory, updated_last_update


  def update_memory(self, nodes, messages):
    """
    Updated nodes' memory by using messages (AGG-->MSG-->MEM, while in paper the order is MSG-->AGG-->MEM)
    
    INPUTS:
        nodes: A list of length len(nodes); Node ids
        message: A dictionary {node_id:[([message_1], timestamp_1), ([message_2], timestamp_2), ...]}; Messages in previous batch
    """
    # Aggregate messages for the same nodes
    unique_nodes, unique_messages, unique_timestamps = self.message_aggregator.aggregate(nodes, messages)

    if len(unique_nodes) > 0:
      unique_messages = self.message_function.compute_message(unique_messages)

    # Update nodes' memory with the aggregated messages
    # Notice: update_memory() updates with no returns
    self.memory_updater.update_memory(unique_nodes, unique_messages,
                                      timestamps=unique_timestamps)
 
    
  # def get_raw_messages(self, source_nodes, destination_nodes, edge_times, edge_idxs):
  #   """
  #   Get source_nodes' raw messages m_raw(t) = {[S(t-1), e(t)], t}
    
  #   INPUTS:
  #      source_nodes: Array of shape [batch_size]; Nodes' raw message to be calculated
  #      destination_nodes: Array of shape [batch_size];
  #      edge_times: Array of shape [batch_size]; Timestamps of interactions (i.e. Current timestamps) for source_nodes
  #      edge_idxs: Array of shape [batch_size]; Index of interactions (at edge_times) for source_nodes
           
  #   OUTPUTS:
  #      unique_sources: Array of shape [unique source nodes]
  #      messages: A dictionary {node_id:[([message_1], timestamp_1), ([message_2], timestamp_2), ...]}
  #                where [message_x] is [S_i(t-1), S_j(t-1), e_ij(t), Phi(t-(t-1))], timestamp_x is the timestamp for each message_x
  #   """  
  #   edge_times = torch.from_numpy(edge_times).float().to(self.device)
  #   edge_features = self.edge_raw_features[edge_idxs]  # e_ij(t), or e(t)
  #   source_memory = self.memory.get_memory(source_nodes)  # S_i(t-1)
  #   destination_memory = self.memory.get_memory(destination_nodes)  # S_j(t-1)
  #   source_time_delta = edge_times - self.memory.last_update[source_nodes]
  #   source_time_delta_encoding = self.time_encoder(source_time_delta.unsqueeze(dim=1)).view(len(source_nodes), -1)  # Phi(t-t^wave)
    
  #   source_message = torch.cat([source_memory, destination_memory, edge_features, source_time_delta_encoding], dim=1)
    
  #   messages = defaultdict(list)
  #   unique_sources = np.unique(source_nodes)

  #   for i in range(len(source_nodes)):
  #     messages[source_nodes[i]].append((source_message[i], edge_times[i]))

  #   return unique_sources, messages
  
  def get_raw_messages(self, source_nodes, source_node_embedding, 
                       destination_nodes, destination_node_embedding, 
                       edge_times, edge_idxs):
      """
      Get source_nodes' raw messages m_raw_i(t) = {[S_i(t-1), S_j(t-1), e(t), Phi(t-(t_last)], t}
       
      INPUTS:
         source_nodes: Array of shape [batch_size]; Nodes' raw message to be calculated
         destination_nodes: Array of shape [batch_size];
         edge_times: Array of shape [batch_size]; Timestamps of interactions (i.e. Current timestamps) for source_nodes
         edge_idxs: Array of shape [batch_size]; Index of interactions (at edge_times) for source_nodes
         source_node_embedding: z_i(t) with shape [batch_size, emb_dim=node_dim=mem_dim]
         destination_node_embedding: z_j(t) with shape [batch_size, emb_dim=node_dim=mem_dim]
             
      OUTPUTS:
         unique_sources: Array of shape [unique source nodes]
         messages: A dictionary {node_id:[([message_1], timestamp_1), ([message_2], timestamp_2), ...]}
                   where [message_x] is [S_i(t-1), S_j(t-1), e_ij(t), Phi(t-(t-1))], timestamp_x is the timestamp for each message_x
      """  
      edge_times = torch.from_numpy(edge_times).float().to(self.device)
      edge_features = self.edge_raw_features[edge_idxs]  # e_ij(t), or e(t)
      
      # s_i(t-1) or z_i(t)
      source_memory = self.memory.get_memory(source_nodes) if not self.use_source_embedding_in_message else source_node_embedding
      # s_j(t-1) or z_j(t)
      destination_memory = self.memory.get_memory(destination_nodes) if not self.use_destination_embedding_in_message else destination_node_embedding
      
      source_time_delta = edge_times - self.memory.last_update[source_nodes]
      source_time_delta_encoding = self.time_encoder(source_time_delta.unsqueeze(dim=1)).view(len(source_nodes), -1)  # Phi(t-t^wave)
      
      unique_sources = np.unique(source_nodes)
      
      source_message = torch.cat([source_memory, destination_memory, edge_features, source_time_delta_encoding], dim=1)
      messages = defaultdict(list)
      for i in range(len(source_nodes)):
          messages[source_nodes[i]].append((source_message[i], edge_times[i]))
          
      return unique_sources, messages
      
  def compute_temporal_embeddings(self, source_nodes, destination_nodes, negative_nodes, edge_times, edge_idxs, n_neighbors=20):
    """
    Compute temporal embeddings for sources, destinations, and negatively sampled destinations.
    Corresponding to algorithm 1 and 2 in the paper.
    
    INPUTS:
        source_nodes: Array of shape [batch_size]; Source node ids.
        destination_nodes: Array of shape [batch_size]; Destination node ids
        negative_nodes: Array of shape [batch_size]; Ids of negative sampled destination
        edge_times: Array of shape [batch_size]; Timestamps of interactions (i.e. Current timestamps) for those nodes (i.e. src, dest, neg)
        edge_idxs: Array of shape [batch_size]; Index of interactions
        n_neighbors: A number of temporal neighbor to consider in each layer (hop)
        
    OUTPUTS: Temporal embeddings for sources, destinations and negatives
        source_node_embedding: A tensor of shape [source_nodes, emb_dim]
        destination_node_embedding: A tensor of shape [destination_nodes, emb_dim]
        negative_node_embedding: A tensor of shape [negative_nodes, emb_dim] 
    """

    n_samples = len(source_nodes)
    nodes = np.concatenate([source_nodes, destination_nodes, negative_nodes])  # all nodes
    positives = np.concatenate([source_nodes, destination_nodes])              # positive pairs
    timestamps = np.concatenate([edge_times, edge_times, edge_times])          # (current) timestamps for those nodes (i.e. V_2(t_1) and V_2(t_2))

    memory = None
    time_diffs = None
    
    if self.use_memory:

        ### Line 5-7 in Algorithm 2: Update memory first with previous batch messages, and then calculate embeddings
        if self.memory_update_at_start:
          # update memory for ALL nodes with messages stored in previous batches
          memory, last_update = self.get_updated_memory(list(range(self.n_nodes)), self.memory.messages)
        ### Line 3.5 in Algorithm 1: Use previous batch memory and calculate embeddings
        else:
          memory = self.memory.get_memory(list(range(self.n_nodes)))
          last_update = self.memory.last_update
        
        # Compute differences between the time the memory of a node was last updated,
        # and the time for which we want to compute the embedding of a node
        source_time_diffs = torch.LongTensor(edge_times).to(self.device) - last_update[source_nodes].long()
        source_time_diffs = (source_time_diffs - self.mean_time_shift_src) / self.std_time_shift_src
        destination_time_diffs = torch.LongTensor(edge_times).to(self.device) - last_update[destination_nodes].long()
        destination_time_diffs = (destination_time_diffs - self.mean_time_shift_dst) / self.std_time_shift_dst
        negative_time_diffs = torch.LongTensor(edge_times).to(self.device) - last_update[negative_nodes].long()
        negative_time_diffs = (negative_time_diffs - self.mean_time_shift_dst) / self.std_time_shift_dst
        
        # time_diffs, i.e. delta_t, is for TimeEmbedding method
        time_diffs = torch.cat([source_time_diffs, destination_time_diffs, negative_time_diffs], dim=0)

    # Compute the embeddings for [source_nodes, destination_nodes, negative_nodes]
    # If memory_update_at_start is True: Line 8 in algorithm 2; The procedure is same as Figure 2 (right) in the paper
    # If memory_update_at_start is False: Line 4 in algorithm 1; The procedure is same as Figure 2 (left) in the paper 
    node_embedding = self.embedding_module.compute_embedding(memory=memory,
                                                             source_nodes=nodes,
                                                             timestamps=timestamps,
                                                             n_layers=self.n_layers,
                                                             n_neighbors=n_neighbors,
                                                             time_diffs=time_diffs)

    source_node_embedding = node_embedding[:n_samples]
    destination_node_embedding = node_embedding[n_samples: 2 * n_samples]
    negative_node_embedding = node_embedding[2 * n_samples:]

    if self.use_memory:
        
        ### Line 12 in algorithm 2: If memory_update_at_start, we persist the update to memory (i.e. S(t-1)) here
        if self.memory_update_at_start:
          # Persist the updates to the memory only for sources and destinations
          self.update_memory(positives, self.memory.messages)
          # Remove messages for the positives, we have already updated the memory using positives old message
          self.memory.clear_messages(positives)
        
        ### Line 7 in algorithm 1
        ### Line 11 in algorithm 2
        # get raw message on source nodes
        unique_sources, source_id_to_messages = self.get_raw_messages(source_nodes,
                                                                      source_node_embedding,
                                                                      destination_nodes,
                                                                      destination_node_embedding,
                                                                      edge_times, edge_idxs)
        # get raw message on destination nodes
        unique_destinations, destination_id_to_messages = self.get_raw_messages(destination_nodes,
                                                                                destination_node_embedding,
                                                                                source_nodes,
                                                                                source_node_embedding,
                                                                                edge_times, edge_idxs)
        
        ### Line 11 in Algorithm 2: If memory_update_at_start, we then store the new raw message
        if self.memory_update_at_start:
           self.memory.store_raw_messages(unique_sources, source_id_to_messages)
           self.memory.store_raw_messages(unique_destinations, destination_id_to_messages)
        ### Line 7-9 in Algorithm 1: If not memory_update_at_start, we update memory here with new raw message 
        else:
          self.update_memory(unique_sources, source_id_to_messages)
          self.update_memory(unique_destinations, destination_id_to_messages)

    return source_node_embedding, destination_node_embedding, negative_node_embedding


  def compute_edge_probabilities(self, source_nodes, destination_nodes, negative_nodes, edge_times, edge_idxs, n_neighbors=20):
    """
    Line 5 in algorithm 1; Line 9 in algorithm 2
    
    Compute probabilities for edges between sources and destination and between sources and
    negatives by first computing temporal embeddings using the TGN encoder and then feeding them
    into the MLP decoder.
    
    INPUTS:
        source_nodes: Array of shape [batch_size]; Source node ids.
        destination_nodes: Array of shape [batch_size]; Destination node ids.
        negative_nodes: Array of shape [batch_size]; Negative node ids.
        edge_times: Array of shape [batch_size]; Timestamps of interactions (i.e. Current timestamps) for those nodes (i.e. src, dest, neg)
        edge_idxs: Array of shape [batch_size]; Index of interactions
        n_neighbors: A number of temporal neighbor to consider in each layer (i.e. Each hop)
    
    OUTPUTS:
    Probabilities for both the positive and negative edges
    """
    n_samples = len(source_nodes)
    
    # get node embeddings for all nodes first
    source_node_embedding, destination_node_embedding, negative_node_embedding = self.compute_temporal_embeddings(
      source_nodes, destination_nodes, negative_nodes, edge_times, edge_idxs, n_neighbors)
    
    # then calculate the P_pos and P_neg
    score = self.affinity_score(torch.cat([source_node_embedding, source_node_embedding], dim=0),
                                torch.cat([destination_node_embedding, negative_node_embedding])).squeeze(dim=0)
    
    pos_score = score[:n_samples]
    neg_score = score[n_samples:]

    return pos_score.sigmoid(), neg_score.sigmoid()
Ejemplo n.º 10
0
    def _init_memory(self):
        """
        Initialize memory class when it's needed by pymem.
        """

        self.memory = Memory()
Ejemplo n.º 11
0
class Pymem(object):
    """Provides class instance methods for general process and memory
manipulations.
    """

    def __init__(self):
        """
        Initialize pymem objects
        """

        self.process = None
        self.memory = None
        self.module = None
        self.pid = None  # refractor
        self.process32 = None  # refractor
        self.process_handle = None

    def _init_process(self):
        """
        Initialize process class when it's needed by pymem.
        """

        self.process = Process()

    def _init_memory(self):
        """
        Initialize memory class when it's needed by pymem.
        """

        self.memory = Memory()

    def _init_module(self):
        """
        Initialize module class when it's needed by pymem.
        """

        self.module = Module()

    @is_init('process')
    def open_process(self, process_id, debug=True):
        """
        Opens a process for interaction.
        """

        if debug:
            if self.process.open_debug(process_id):
                self.process_handle = self.process.h_process
                self.pid = process_id
                self.process32 = self.process.process32
                return True
            return False
        return self.process.open(process_id)

    @is_init('process')
    def open_process_from_name(self, process_name, debug=True, number=0):
        """
        Opens a process from its name for interaction.
        """

        processes = process_from_name(process_name)
        if processes is not None:
            if len(processes) - 1 == number:
                process = processes[len(processes) - 1]
                return self.open_process(process.th32ProcessID, debug)
        return False

    @is_init('memory')
    def read_offset(self, address, selected_type):
        """
        Read memory from a process.
        If the type <T> is supported, this method will provide the required
        call in order to read, from the process. If either the type <T> is not
        supported or process is not Open, the method will raise an Exception.

        Supported types : float, int, uint, long, ulong, int64, uint64, byte
        """

        if self.process.is_process_open:
            if self.memory.is_process_set == False:
                self.memory.set_process(self.process.h_process)
            return self.memory.read_offset(address, selected_type)
        return False

    @is_init('module')
    def list_module32(self):
        """
        Return module (MODULEENTRY32) loaded by current process.
        """

        if self.process32 is not None:
            if self.module.is_process_set == False:
                self.module.set_process32(self.process32)
            return self.module.list_module32()
        return []

    @is_init('module')
    def has_module32(self, module):
        """
        Return True if current process has loaded given module (dll)
        """

        if self.process32 is not None:
            if self.module.is_process_set == False:
                self.module.set_process32(self.process32)
            return self.module.has_module32(module)
        return False
Ejemplo n.º 12
0
class ATC(core.Entity):
    ''' Example new entity object for BlueSky. '''
    def __init__(self):
        super().__init__()
        self.super_start = time.perf_counter()

        self.initilized = False

        self.epoch_counter = 0
        # [Success, Fail]
        self.results = np.zeros(2)

        self.all_success = []
        self.all_fail = []
        self.mean_success = 0
        self.all_mean_success, self.best = 0, 0
        self.mean_rewards = []
        self.epoch_actions = np.zeros(ACTION_SHAPE)

        self.start = None
        self.stop = None

        self.dist = [0, -1]
        self.spd = [0, -1]
        self.trk = [0, 360]
        self.vs = [0, -1]

        self.last_observation = {}
        self.last_reward_observation = {}
        self.previous_action = {}
        self.observation = {}

    def on_load(self):
        self.sector_manager = Sector_Manager(SECTORS)
        self.route_manager = Route_Manager(ROUTES,
                                           test_routes=VISUALIZE,
                                           draw_paths=VISUALIZE)
        self.traffic_manager = Traffic_Manager(
            max_ac=MAX_AC,
            times=TIME_SEP,
            max_spd=CONSTRAINTS["cas"]["max"],
            min_spd=CONSTRAINTS["cas"]["min"],
            max_alt=32000,
            min_alt=32000,
            network=self.route_manager)

        self.memory = Memory()

        self.agent = Agent(state_size=STATE_SHAPE,
                           action_size=ACTION_SHAPE,
                           value_size=VALUE_SHAPE)

        try:
            self.agent.load(path=FILE + "best.h5")
        except:
            try:
                self.agent.load(path=FILE + ".h5")
            except:
                pass

        self.initilized = True

        print("ATC: READY")
        string = "=================================\n   UPDATE: RUNNING EPOCH {}\n=================================\n".format(
            self.format_epoch())
        self.print_all(string)

    # Functions that need to be called periodically can be indicated to BlueSky
    # with the timed_function decorator

    @core.timed_function(name='example', dt=12)
    def update(self):
        # Initilize system
        if not self.initilized:
            self.on_load()

        # Start epoch timer
        if not self.start:
            self.start = time.perf_counter()

        # Create aircraft
        self.traffic_manager.spawn()
        # Update Aircraft active sectors
        self.traffic_manager.update_active(self.sector_manager.system_sectors)

        # Generate a full distancematrix between each aircraft
        full_dist_matrix = self.get_dist_martix()

        # Get nearest ac in a matrix
        nearest_ac = self.get_nearest_ac(dist_matrix=full_dist_matrix)

        # Get goal distances for each aircraft
        g_distance = self.get_goal_distances()

        # Get an array of terminal aircraft
        terminal_ac, terminal_id = self.get_terminal(nearest_ac, g_distance)

        self.handle_terminal(terminal_id)

        if self.traffic_manager.check_done():
            self.epoch_reset()
            return

        if not TRAIN and (self.traffic_manager.total % 50 == 0):
            string = "Success: {} | Fail: {} | Mean Success: {:.3f}%".format(
                int(self.results[0]), int(self.results[1]),
                (self.results[0] / MAX_AC) * 100)
            self.print_all(string)

        if len(traf.id) <= 0:
            return

        if not len(traf.id) == 0:
            policy, normal_state, normal_context = self.get_actions(
                terminal_ac, g_distance, full_dist_matrix)

            if len(policy) > 0:
                idx = 0
                new_actions = {}
                for i in range(len(traf.id)):
                    if terminal_ac[i] == 0 and len(
                            self.traffic_manager.active_sectors[i]) > 0:
                        if not np.any(np.isnan(policy[idx])):
                            _id = traf.id[i]

                            if not _id in self.last_observation.keys():
                                self.last_observation[_id] = [
                                    normal_state[idx], normal_context[idx]
                                ]

                            action = np.random.choice(
                                ACTION_SHAPE, 1, p=policy[idx].flatten())[0]

                            # print(policy[idx], action)

                            self.epoch_actions[action] += 1

                            if not _id in self.observation.keys(
                            ) and _id in self.previous_action.keys():
                                self.observation[_id] = [
                                    normal_state[idx], normal_context[idx]
                                ]

                                self.memory.store(_id,
                                                  self.last_observation[_id],
                                                  self.previous_action[_id],
                                                  nearest_ac[idx])

                                self.last_observation[_id] = self.observation[
                                    _id]

                                del self.observation[_id]

                            self.perform_action(i, action)

                            new_actions[_id] = action

                        self.previous_action = new_actions

                        idx += 1

    # Act
    def get_actions(self, terminal_ac, g_dists, dist_matrix):
        ids = []
        new_actions = {}

        state = self.get_state()

        normal_state, normal_context = self.normalise_all(
            state, terminal_ac, g_dists, dist_matrix)

        policy = []
        if not len(normal_state) == 0:
            policy = self.agent.act(normal_state, normal_context)

        return policy, normal_state, normal_context

    # For an aircraft perform an action
    def perform_action(self, i, action):
        if action < 3:
            traf_alt = int(traf.alt[i] / ft)
            new_alt = int(round((traf_alt + ACTIONS[action])))

            alt = max(CONSTRAINTS["alt"]["min"],
                      min(CONSTRAINTS["alt"]["max"], new_alt))

            # print(traf_alt, alt)

            stack.stack("{} alt {}".format(traf.id[i], alt))
        elif action == 4:
            traf_alt = traf.alt[i] / ft
            new_alt = int(round((traf_alt)))

    # Get the current state

    def get_state(self):
        state = np.zeros((len(traf.id), 6))

        start_ids, end_ids = self.get_all_nodes()

        state[:, 0] = traf.lat
        state[:, 1] = traf.lon
        state[:, 2] = traf.trk
        state[:, 3] = traf.alt
        state[:, 4] = traf.tas
        state[:, 5] = traf.vs

        return state

    # Get all nodes for each aircraft
    def get_all_nodes(self):
        start_ids = np.zeros(len(traf.id), dtype=int)
        end_ids = np.zeros(len(traf.id), dtype=int)

        for i in range(len(traf.id)):
            _id = traf.id[i]
            route = self.traffic_manager.routes[_id]
            start_ids[i] = np.argwhere(
                self.route_manager.idx_array == route[0])
            end_ids[i] = np.argwhere(self.route_manager.idx_array == route[-1])

        return start_ids, end_ids

    # Normalise the state and context
    def normalise_all(self, state, terminal_ac, g_dists, dist_matrix):
        normal_states = self.normalise_state(state, terminal_ac, g_dists)

        normal_context = []

        start_ids, end_ids = self.get_all_nodes()

        max_agents = 0
        for _id in traf.id:
            if terminal_ac[traf.id2idx(_id)] > 0 or len(
                    self.traffic_manager.active_sectors[traf.id2idx(
                        _id)]) <= 0:
                continue

            new_context = self.normalise_context(_id, terminal_ac, dist_matrix,
                                                 start_ids, end_ids)

            max_agents = max(max_agents, len(new_context))

            if len(normal_context) == 0:
                normal_context = new_context
            else:
                normal_context = np.append(
                    keras.preprocessing.sequence.pad_sequences(
                        normal_context, max_agents, dtype='float32'),
                    keras.preprocessing.sequence.pad_sequences(
                        new_context, max_agents, dtype='float32'),
                    axis=0)

        if len(normal_context) == 0:
            normal_context = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(1, 1, 7)

        # print(normal_states.shape, normal_context.shape)
        return normal_states, normal_context

    # Normalise the agent state only
    def normalise_state(self, state, terminal_ac, g_dists):
        total_active = 0

        for i in range(len(terminal_ac)):
            if terminal_ac[i] == 0 and len(
                    self.traffic_manager.active_sectors[i]) > 0:
                total_active += 1

        normalised_state = np.zeros((total_active, STATE_SHAPE))

        count = 0
        for i in range(len(traf.id)):
            if terminal_ac[i] > 0 or len(
                    self.traffic_manager.active_sectors[i]) <= 0:
                continue

            normalised_state[count, :] = self.normalise(state[i],
                                                        'state',
                                                        traf.id[i],
                                                        g_dist=g_dists[i])

            count += 1

        return normalised_state

    # Get and normalise context
    def normalise_context(self, _id, terminal_ac, dist_matrix, start_ids,
                          end_ids):
        context = []
        idx = traf.id2idx(_id)

        distances = dist_matrix[:, idx]
        this_sectors = self.traffic_manager.active_sectors[idx]

        this_lat, this_lon = traf.lat[idx], traf.lon[idx]

        for i in range(len(distances)):
            # Ignore current aircraft
            if i == idx:
                continue

            if terminal_ac[i] > 0 or len(
                    self.traffic_manager.active_sectors[i]) <= 0:
                continue

            sectors = self.traffic_manager.active_sectors[i]

            # Only care if the ac in a matching sector
            flag = False
            for x in sectors:
                if x in this_sectors:
                    flag = True

            if not flag:
                continue

            dist = get_dist([this_lat, this_lon], [traf.lat[i], traf.lon[i]])

            # Only care about visible distance aircraft
            if dist > 40:
                continue

            spd = traf.tas[i]
            alt = traf.alt[i]
            trk = traf.trk[i]
            vs = traf.vs[i]
            start_id = start_ids[i]
            end_id = end_ids[i]

            self.dist[1] = max(self.dist[1], dist)
            self.spd[1] = max(self.spd[1], spd)
            self.vs[1] = max(self.vs[1], vs)

            dist = dist / self.dist[1]
            spd = spd / self.spd[1]
            trk = trk / self.trk[1]
            alt = ((alt/ft)-CONSTRAINTS["alt"]["min"]) / \
                (CONSTRAINTS["alt"]["max"]-CONSTRAINTS["alt"]["min"])

            vs = 0
            if not vs == 0:
                vs = vs / self.vs[1]

            n_nodes, dist2next = get_n_nodes(traf.id[i], self.traffic_manager,
                                             self.route_manager)

            self.dist[1] = max(self.dist[1], dist2next)
            dist2next = dist2next / self.dist[1]

            if len(context) == 0:
                context = np.array([
                    spd, alt, trk, vs, dist, dist2next, n_nodes[0], n_nodes[1],
                    n_nodes[2]
                ]).reshape(1, 1, 9)
            else:
                context = np.append(context,
                                    np.array([
                                        spd, alt, trk, vs, dist, dist2next,
                                        n_nodes[0], n_nodes[1], n_nodes[2]
                                    ]).reshape(1, 1, 9),
                                    axis=1)

        if len(context) == 0:
            context = np.zeros(9).reshape(1, 1, 9)

        return context

    # perform normalisation
    def normalise(self, state, what, _id, g_dist=None):

        # Normalise the entire state
        if what == 'state':
            if not g_dist:
                raise Exception(
                    "For normalising a state please pass the distance to the goal."
                )

            self.dist[1] = max(self.dist[1], g_dist)
            self.spd[1] = max(self.spd[1], state[4])
            self.vs[1] = max(self.vs[1], state[5])

            dist = g_dist / self.dist[1]
            spd = state[4] / self.spd[1]
            trk = state[2] / self.trk[1]
            alt = ((state[3]/ft)-CONSTRAINTS["alt"]["min"]) / \
                (CONSTRAINTS["alt"]["max"]-CONSTRAINTS["alt"]["min"])

            vs = 0
            if not state[5] == 0:
                vs = state[5] / self.vs[1]

            n_nodes, dist2next = get_n_nodes(_id, self.traffic_manager,
                                             self.route_manager)

            self.dist[1] = max(self.dist[1], dist2next)
            dist2next = dist2next / self.dist[1]

            return np.array([
                spd, alt, trk, vs, dist, dist2next, n_nodes[0], n_nodes[1],
                n_nodes[2]
            ])

    # Get the terminal aircraft
    def get_terminal(self, nearest_ac, g_dists):
        terminal_ac = np.zeros(len(traf.id), dtype=int)
        terminal_id = []

        # Loop through all aircraft
        for i in range(len(traf.id)):
            # Terminal state 0 = not terminal, 1 = collision, 2 = success
            T = 0

            # Only care about aircraft in a sector
            if len(self.traffic_manager.active_sectors[i]) > 0:
                close_ac = nearest_ac[i]
                n_ac_data = (close_ac[0], close_ac[1])

                # Get the terminal state
                T = self.agent.terminal(i, n_ac_data, g_dists[i])

                # Only care about terminal aircraft
                if not T == 0:
                    # Update collision aircraft
                    if T == 1:
                        terminal_ac[i] = 1
                        terminal_ac[traf.id2idx(close_ac[2])] = 1
                    elif not terminal_ac[i] == 1:
                        terminal_ac[i] = 2

                    _id = traf.id[i]
                    self.memory.store(_id, self.last_observation[_id],
                                      self.previous_action[_id], nearest_ac[i],
                                      T)

        for i in range(len(terminal_ac)):
            if terminal_ac[i] > 0:
                terminal_id.append([traf.id[i], terminal_ac[i]])

        return terminal_ac, terminal_id

    # Handle terminal aircraft
    def handle_terminal(self, terminal_id):
        for ac in terminal_id:
            stack.stack('DEL {}'.format(ac[0]))

            self.traffic_manager.active -= 1

            if ac[1] == 1:
                self.results[1] += 1
            elif ac[1] == 2:
                self.results[0] += 1

    # Generates a distance matrix of all aircraft in the system
    def get_dist_martix(self):
        size = traf.lat.shape[0]
        return geo.latlondist_matrix(np.repeat(traf.lat, size),
                                     np.repeat(traf.lon, size),
                                     np.tile(traf.lat, size),
                                     np.tile(traf.lon,
                                             size)).reshape(size, size)

    # Get the nearest aircraft to agents
    def get_nearest_ac(self, dist_matrix):
        nearest = []

        # Loop through all aircraft
        for i in range(len(traf.id)):
            a_alt = traf.alt[i] / ft

            ac_dists = dist_matrix[:, i]

            close = 10e+25
            alt_sep = 10e+25

            nearest_id = None

            # Loop through the row on the dist matrix
            for x in range(len(ac_dists)):
                # Ensure the aircraft is in controlled airspace and not the current aircraft
                if not x == i and len(
                        self.traffic_manager.active_sectors[x]) > 0:

                    # See if it is closest and update
                    if ac_dists[x] < close:
                        close = float(ac_dists[x])
                        i_alt = traf.alt[x] / ft

                        alt_sep = abs(a_alt - i_alt)

                        nearest_id = traf.id[x]
            nearest.append([close, alt_sep, nearest_id])

        return np.array(nearest)

    # returns a matrix of distances to a goal
    def get_goal_distances(self):
        goal_ds = np.zeros(len(traf.id), dtype=float)

        for i in range(len(traf.id)):
            goal_ds[i] = get_goal_dist(traf.id[i], self.traffic_manager,
                                       self.route_manager)

        return goal_ds

    # Reset the environment for the next epoch
    def epoch_reset(self):
        # Reset the traffic creation
        self.traffic_manager.reset()

        # Keep track of all success and failures
        self.all_success.append(self.results[0])
        self.all_fail.append(self.results[1])

        # Calcuate total mean success
        self.all_mean_success = np.mean(self.all_success)

        # Calcuate rolling mean success
        if (self.epoch_counter + 1) >= 50:
            self.mean_success = np.mean(self.all_success[-50:])

        if (self.epoch_counter + 1) % 5 == 0:
            if self.mean_success > self.best:
                if TRAIN:
                    print('::::::: Saving Best ::::::')
                    self.agent.save(path=NEW_FILE + "best.h5")
                self.best = self.mean_success
            if TRAIN:
                print(':::::: Saving Model ::::::')
                self.agent.save(path=NEW_FILE + ".h5")
                print(":::::::: Training ::::::::")
                self.agent.train(self.memory)
                print(":::::::: Complete ::::::::")

        temp = np.array([np.array(self.all_success), np.array(self.all_fail)])
        np.savetxt("Files/" + NEW_FILE + "_numpy.csv", temp, delimiter=',')

        # Stop the timer
        self.stop = time.perf_counter()
        # -------- Printing Outputs --------
        string = "Epoch run in {:.2f} seconds".format(self.stop - self.start)
        self.print_all(string)
        string = "Success: {} | Fail: {} | Mean Success: {:.3f}% | (50) Mean Success Rolling {:.3f}% | Best {:.3f}%".format(
            int(self.results[0]), int(self.results[1]),
            (self.all_mean_success / MAX_AC) * 100,
            (self.mean_success / MAX_AC) * 100, (self.best / MAX_AC) * 100)
        self.print_all(string)
        string = "Actions -> Descend: {}, Hold Current: {}, Climb: {}, Maintain Climb: {}".format(
            self.epoch_actions[0], self.epoch_actions[1],
            self.epoch_actions[2], self.epoch_actions[3])
        # string = "Actions -> Descend: {}, Climb: {}".format(
        #     self.epoch_actions[1], self.epoch_actions[0])
        self.print_all(string)

        if self.epoch_counter + 1 >= EPOCHS:
            super_stop = time.perf_counter()
            stack.stack("STOP")
            string = "::END:: Training {} episodes took {:.2f} hours".format(
                EPOCHS, ((super_stop - self.super_start) / 60) / 60)
            self.print_all(string)
            return

        self.epoch_counter += 1
        string = "=================================\n   UPDATE: RUNNING EPOCH {}\n=================================\n".format(
            self.format_epoch())
        self.print_all(string)

        # Reset values
        self.results = np.zeros(2)
        self.stop = None
        self.start = None
        self.mean_rewards = []
        self.epoch_actions = []
        self.epoch_actions = np.zeros(ACTION_SHAPE)

        self.previous_action = {}
        self.last_observation = {}
        self.observation = {}

    # Scripts for printing values
    def print_all(self, string):
        stack.stack(f'ECHO {string}')
        print(string)

    def format_epoch(self):
        epoch_string = ""

        if self.epoch_counter + 1 < 10:
            epoch_string += "0"
        if self.epoch_counter + 1 < 100:
            epoch_string += "0"
        if self.epoch_counter + 1 < 1000:
            epoch_string += "0"
        if self.epoch_counter + 1 < 10000:
            epoch_string += "0"

        epoch_string += str(self.epoch_counter + 1)
        return epoch_string