model.zero_grad()
            pos, neg, pos_h_e, pos_t_e, neg_h_e, neg_t_e = model(
                pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch,
                neg_t_batch, neg_r_batch)

            if args.loss_type == 0:
                losses = loss_function(pos, neg, margin)
            else:
                losses = loss_function(pos, neg)
            ent_embeddings = model.ent_embeddings(
                torch.cat([pos_h_batch, pos_t_batch, neg_h_batch,
                           neg_t_batch]))
            rel_embeddings = model.rel_embeddings(
                torch.cat([pos_r_batch, neg_r_batch]))
            losses = losses + loss.normLoss(ent_embeddings) + loss.normLoss(
                rel_embeddings) + loss.normLoss(pos_h_e) + loss.normLoss(
                    pos_t_e) + loss.normLoss(neg_h_e) + loss.normLoss(neg_t_e)

            losses.backward()
            optimizer.step()
            total_loss += losses.data

        agent.append(trainCurve, epoch, total_loss[0])

        if epoch % 10 == 0:
            now_time = time.time()
            print(now_time - start_time)
            print("Train total loss: %d %f" % (epoch, total_loss[0]))

        if epoch % 10 == 0:
Beispiel #2
0
                                 pos_h_batch, pos_t_batch, pos_r_batch,
                                 pos_time_batch, neg_h_batch, neg_t_batch,
                                 neg_r_batch, neg_time_batch)

                if args.loss_type == 0:
                    losses = loss_function(pos, neg, margin)
                else:
                    losses = F.cross_entropy(pos, neg)
                ent_embeddings = model.ent_embeddings(
                    torch.cat(
                        [pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))

                rseq_embeddings = model.get_rseq(
                    torch.cat([pos_r_batch, neg_r_batch]),
                    torch.cat([pos_time_batch, neg_time_batch]))
                losses = losses + loss.normLoss(
                    ent_embeddings) + loss.normLoss(rseq_embeddings)
                losses.backward()
                optimizer.step()
                total_loss += losses.data

                average_loss = int(total_loss[0]) / len(trainBatchList)

            for_loss.append(average_loss)

            if (epoch) % 5 == 0:
                now_time = time.time()
                print(now_time - start_time)
                print("Train total loss: %d %f" % (epoch, total_loss[0]))

            if config.early_stopping_round > 0:
                if epoch == 0:
            neg_r_batch = autograd.Variable(longTensor(neg_r_batch))

            model.zero_grad()
            pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch,
                             neg_h_batch, neg_t_batch, neg_r_batch)

            if args.loss_type == 0:
                losses = loss_function(pos, neg, margin)
            else:
                losses = loss_function(pos, neg)
            ent_embeddings = model.ent_embeddings(
                torch.cat([pos_h_batch, pos_t_batch, neg_h_batch,
                           neg_t_batch]))
            rel_embeddings = model.rel_embeddings(
                torch.cat([pos_r_batch, neg_r_batch]))
            losses = losses + loss.normLoss(ent_embeddings) + loss.normLoss(
                rel_embeddings)

            losses.backward()
            optimizer.step()
            total_loss += losses.data

        agent.append(trainCurve, epoch, total_loss[0])

        if epoch % 10 == 0:
            now_time = time.time()
            print(now_time - start_time)
            print("Train total loss: %d %f" % (epoch, total_loss[0]))

        if epoch % 10 == 0:
            if config.filter == True:
                losses = loss_function(
                    pos, neg, margin
                )  #margin is applied to every (pos_distance, neg_distance) and summed
            else:
                losses = loss_function(pos, neg)
                #check shape
            #print("ENT EMBEDDING SHAPE: " , ent_embeddings.shape)
            #print("REL EMBEDDING SHAPE: " , rel_embeddings.shape)
            #losses = losses #+ loss.normLoss(ent_embeddings) + loss.normLoss(rel_embeddings) #Are these regularization? #What are these?
            #losses += loss.normLoss(ent_embeddings)
            if args.norm_reg_rel == 1:
                ent_embeddings = model.ent_embeddings(
                    torch.cat(
                        [pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
                #rel_embeddings = model.vvrel_embedding_func(torch.cat([pos_r_batch]))
                losses += loss.normLoss(
                    ent_embeddings)  #+ loss.normLoss(ent_embeddings)
            backward_tim = time.time()
            losses.backward()
            optimizer.step()
            total_loss += losses.data
            #print("backward time :", time.time()-backward_tim)

            #Normalize the rand vecs and put them back
        if args.transH_test == 0:
            if args.norm_mat == 1:
                model.normalize_all_rows()
            if args.norm_ent == 1:
                model.normalize_ent_embeddings()
            if args.norm_rel == 1:
                model.normalize_vvrel_embeddings()
        else: