dgnn = dgnn.to(device) num_instance = len(train_data.sources) num_batch = math.ceil(num_instance / BATCH_SIZE) logger.info('num of training instances: {}'.format(num_instance)) logger.info('num of batches per epoch: {}'.format(num_batch)) idx_list = np.arange(num_instance) new_nodes_val_aps = [] val_aps = [] epoch_times = [] total_epoch_times = [] train_losses = [] early_stopper = EarlyStopMonitor(max_round=args.patience) for epoch in range(NUM_EPOCH): start_epoch = time.time() ### Training # Reinitialize memory of the model at the start of each epoch dgnn.memory_s.__init_memory__() dgnn.memory_g.__init_memory__() # Train using only training graph dgnn.set_neighbor_finder(train_ngh_finder) m_loss = [] logger.info('start {} epoch'.format(epoch)) for k in range(0, num_batch, args.backprop_every): loss = 0