Esempio n. 1
0
def run_epoch(net, train_loader, test_loader, CUDA):
    train_acc = 0.0
    train_loss = 0.0
    test_acc = 0.0
    test_loss = 0.0
    epoch_it = 0
    final_it = len(train_loader)
    t0 = time.perf_counter()
    for (x, y) in train_loader:
        x = x.transpose(0, 1).contiguous()
        x, y = tovar(x, CUDA), tovar(y, CUDA)

        net.zero_grad()
        p = net(x)
        y_pred = p[-1, :, :].view(-1)

        loss = nn.SoftMarginLoss()
        val_loss = loss(y_pred, y)

        val_loss.backward()
        net.adam.step()
        with torch.no_grad():
            y_pred = np.array([1. if x > 0 else -1. for x in y_pred])
            train_acc += sum(y_pred == y.cpu().data.numpy()) / y_pred.shape[0]
            train_loss += val_loss.cpu().data.numpy()
        epoch_it += 1
        t1 = time.perf_counter() - t0
        #load_cb(epoch_it, final_it, 'Training', t1)
    train_acc /= epoch_it
    train_loss /= epoch_it

    # Validation step
    epoch_it = 0
    final_it = len(test_loader)
    t0 = time.perf_counter()
    with torch.no_grad():
        for (x, y) in test_loader:
            x = x.transpose(0, 1).contiguous()
            x, y = tovar(x, CUDA), tovar(y, CUDA)
            p = net(x)
            y_pred = p[-1, :, :].view(-1)
            loss = nn.SoftMarginLoss()
            val_loss = loss(y_pred, y)
            test_loss += val_loss.cpu().data.numpy()
            y_pred = np.array([1. if x > 0 else -1 for x in y_pred])
            test_acc += sum(y_pred == y.cpu().data.numpy()) / y_pred.shape[0]
            epoch_it += 1
            t1 = time.perf_counter() - t0
            #load_cb(epoch_it, final_it, 'Testing', t1)
        test_acc /= epoch_it
        test_loss /= epoch_it
    return train_acc, train_loss, test_acc, test_loss
Esempio n. 2
0
 def forward(self, distance_matrix, num_batch_classes):
     num_batch_images = distance_matrix.shape[0]
     num_images_per_class = num_batch_images // num_batch_classes
     template = torch.zeros((num_batch_images, num_batch_images))
     for x in range(num_batch_images):
         min = x // num_images_per_class * num_images_per_class
         max = min + num_images_per_class
         for y in range(min, max):
             if x != y:
                 template[x, y] = 1
     positive_distance_matrix = distance_matrix.mul(template)
     # print(positive_distance_matrix[:10,:10])
     negative_distance_matrix = distance_matrix - positive_distance_matrix
     # print(negative_distance_matrix[:10,:10])
     positive_distance = torch.amax(positive_distance_matrix, dim=1)
     # print(positive_distance[:10], positive_distance.shape)
     negative_distance, _ = torch.sort(negative_distance_matrix)
     # print(negative_distance[:10, :10])
     negative_distance = negative_distance[:, num_images_per_class]
     # print(negative_distance[:10], negative_distance.shape)
     one = torch.ones(num_batch_images)
     one = -one
     if self.soft_margin:
         soft_margin_loss = nn.SoftMarginLoss()
         loss = soft_margin_loss(positive_distance - negative_distance, one)
     else:
         losses = positive_distance - negative_distance + self.margin
         # print(losses[:10], losses.shape)
         losses = torch.clamp(losses, min=0)
         # print(losses[:10], losses.shape)
         loss = torch.mean(losses)
     return loss
Esempio n. 3
0
 def __init__(self, margin=None, p=2):
     super(TripletLoss, self).__init__()
     self.margin = margin
     if self.margin == 0.0:  # use soft-margin
         self.Loss = nn.SoftMarginLoss()
     else:
         self.Loss = nn.TripletMarginLoss(margin=margin, p=p)
Esempio n. 4
0
    def __init__(self,
                 switchNet,
                 weightNet,
                 apply_f,
                 lr=0.001,
                 alpha=0.001,
                 beta=0.001,
                 max_grad=None,
                 log_name=None,
                 silence=True,
                 mtl=False,
                 max_time=30,
                 n_early_stopping=100,
                 print_every=100,
                 plot=True,
                 weight_decay=0,
                 switch_update_every=1,
                 weight_update_every=1):
        '''
        optimizer: optimization method, default to adam
        alpha: z entropy weight
        beta: y entropy weight
        max_grad: gradient clipping max
        silence: don't output graph and statement
        mtl: multi-task learning
        print_every: print every few iterations, if 0 then don't print
        weight_update_every: update weight every few steps
        switch_update_every: update switch every few steps
        '''
        switchNet = to_cuda(switchNet)
        weightNet = to_cuda(weightNet)

        self.max_time = max_time  # max gpu training time
        self.switchNet = switchNet
        self.switch_size = switchNet.switch_size
        self.weightNet = weightNet
        self.apply_f = apply_f
        self.n_early_stopping = n_early_stopping
        self.print_every = print_every
        self.draw_plot = plot
        self.weight_update_every = weight_update_every
        self.switch_update_every = switch_update_every

        self.mtl = mtl
        self.silence = silence

        self.setLogName(log_name)

        self.optSwitch = torch.optim.Adam(self.switchNet.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay)
        self.optWeight = torch.optim.Adam(self.weightNet.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay)
        self.loss = nn.SoftMarginLoss()  # logit loss
        self.elementwise_loss = logit_elementwise_loss
        self.max_grad = max_grad
        self.alpha = alpha
        self.beta = beta
        self.z = None
Esempio n. 5
0
 def __init__(self, margin=None):
   self.margin = margin
   if margin is not None:
     self.ranking_loss = nn.MarginRankingLoss(margin=margin)  
     #评价相似度的损失. loss(x1,x2,y)=max(0,−y∗(x1−x2)+margin). 这里的三个都是标量,y 只能取1 或者-1
   else:
     self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 6
0
 def __init__(self, device, margin=None):
     self.margin = margin
     self.device = device
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 7
0
    def __init__(self, *args, **kwargs):
        super(WeightedHolE, self).__init__()
        # self.add_hyperparam('rparam', kwargs.pop('rparam', 0.0))

        self.learning_rate = kwargs.get('lr', _DEF_LEARNING_RATE)
        entity_dim, _, relation_dim = args[0]
        embed_dim = args[1]
        self._max_epochs = kwargs.get('max_epochs', _DEF_MAX_EPOCHS)
        
        init_relations = kwargs.get('init_relations')
        if init_relations is not None:
            self.R = nn.Parameter(init_relations)
        else:
            self.R = nn.Parameter(torch.FloatTensor(relation_dim, embed_dim).uniform_(-.1,.1))
        self.R.my_name = 'R'
        self.R.grad = torch.zeros_like(self.R)
        
        pretrained_ent = kwargs.get('pretrained_entities')
        if pretrained_ent is not None:
            self.E = nn.Parameter(pretrained_ent)
        else:
            self.E = nn.Parameter(torch.FloatTensor(entitiy_dim, embed_dim).uniform_(-.1,.1))
        self.E.my_name = 'E'
        self.E.grad = torch.zeros_like(self.E)
        
        self.loss_function = nn.SoftMarginLoss(reduction='sum')
        self.optim = Adagrad(list(self.parameters()), lr=self.learning_rate)
Esempio n. 8
0
 def criterion(self,
               size_average=None,
               reduce=None,
               reduction='mean') -> nn.SoftMarginLoss:
     return nn.SoftMarginLoss(size_average=size_average,
                              reduce=reduce,
                              reduction=reduction)
Esempio n. 9
0
    def forward(self, input1, input2, gt=None):
        output1 = self.cnn(input1)
        #print("output1.shape = " + str(output1.shape))

        output2 = self.cnn(input2)
        #print("output2.shape = " + str(output2.shape))

        # define probabilty map dimensions
        b_size = output1.size()[0]
        oH = output1.size()[2] - output2.size()[2] + 1
        oW = output1.size()[3] - output2.size()[3] + 1
        output = Variable(torch.zeros((b_size, 1, oH, oW)))
        """
        output = F.conv2d(output1, output2)
        """

        # perform cross-correlation operation
        for i in range(oH):
            for j in range(oW):
                output[:, :, i, j] = torch.sum(
                    torch.mul(output1[:, :, i:i + 6, j:j + 6],
                              output2)) / output2.nelement()

        if self.training:
            self.loss = nn.SoftMarginLoss(size_average=True)(output, gt)
            #print(type(self.loss))

        return output
Esempio n. 10
0
 def __init__(self, margin=None):
     super(TripletLoss, self).__init__()
     self.margin = margin
     if self.margin is None:
         self.Loss = nn.SoftMarginLoss()
     else:
         self.Loss = nn.TripletMarginLoss(margin=margin, p=2)
Esempio n. 11
0
 def __init__(self, margin=None, metric="euclidean"):
     self.margin = margin
     self.metric = metric
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 12
0
 def __init__(self, encoder, decoder, discriminator_cnn, discriminator_dense, output_lang, max_length, embedding, input_vocab_size,
              num_layers=1, batch_size=1):
     self.encoder = encoder
     self.decoder = decoder
     self.discriminator_cnn = discriminator_cnn
     self.discriminator_dense = discriminator_dense
     self.output_lang = output_lang
     self.num_layers = num_layers
     self.SOS_token = 1
     self.EOS_token = 2
     self.max_length = max_length
     self.batch_size = batch_size
     self.use_cuda = torch.cuda.is_available()
     self.input_vocab_size = input_vocab_size
     self.output_vocab_size = embedding.shape[0]
     self.embedding_length = embedding.shape[1]
     self.embedding = nn.Embedding(embedding.shape[0], embedding.shape[1])
     self.embedding.weight = nn.Parameter(embedding)
     self.fake_labels = -1*torch.ones(self.batch_size)
     self.true_labels = torch.ones(self.batch_size)
     self.final_criterion = nn.MSELoss()
     self.discriminator_criterion = nn.SoftMarginLoss()
     if self.use_cuda:
         self.embedding = self.embedding.cuda()
         self.fake_labels = self.fake_labels.cuda()
         self.true_labels = self.true_labels.cuda()
Esempio n. 13
0
 def __init__(self, margin=None):
     super(TripletLoss, self).__init__()
     self.margin = margin
     if self.margin is None:  # if no margin assigned, use soft-margin
         self.Loss = nn.SoftMarginLoss()
     else:
         self.Loss = nn.TripletMarginLoss(margin=margin, p=2)
Esempio n. 14
0
 def __init__(self, margin=None, hard_factor=0.0):
     self.margin = margin
     self.hard_factor = hard_factor
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 15
0
 def __init__(self, margin=0, num_instances=0, use_semi=True):
     super(TripletLoss2, self).__init__()
     self.margin = margin
     self.use_semi = use_semi
     #self.ranking_loss = nn.MarginRankingLoss(margin=self.margin)
     self.ranking_loss = nn.SoftMarginLoss().cuda()
     self.K = num_instances
 def __init__(self, margin=None):
     self.margin = margin
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
     self.bceloss = nn.BCELoss()
Esempio n. 17
0
 def __init__(self, margin=None, mining_method='batch_hard'):
     self.margin = margin
     self.mining_method = mining_method
     if margin > 0:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 18
0
 def __init__(self, margin=None):
     self.margin = margin
     if margin is not None:
         ## 模拟那个max的过程
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 19
0
 def __init__(self, margin=0.3):
     super(TripletLoss, self).__init__()
     self.margin = margin
     if margin == 0.:
         self.ranking_loss = nn.SoftMarginLoss()
     else:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
Esempio n. 20
0
 def __init__(self, margin=None):
     self.margin = margin
     #print(margin)
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 21
0
 def __init__(self, margin=1.0):
     super().__init__()
     self.margin = margin
     if margin is not None:
         self.loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.loss = nn.SoftMarginLoss()
Esempio n. 22
0
 def __init__(self, margin=None):
     super(TripletLoss_id, self).__init__()
     self.margin = margin
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 23
0
 def __init__(self, first=nn.CrossEntropyLoss(),
              second=nn.SoftMarginLoss()):
     super(new_loss, self).__init__()
     self.first = first
     self.second = second
     self.fc2 = nn.Linear(2, 1)
     self.sig = nn.Sigmoid()
Esempio n. 24
0
 def __init__(self, margin = None):
     super(MixedLoss, self).__init__()
     self.margin = margin
     if self.margin is None:  # use soft-margin
         self.Loss = nn.SoftMarginLoss()
     else:
         self.Loss = nn.TripletMarginLoss(margin = margin, p = 2)
     self.class_loss = nn.CrossEntropyLoss()
Esempio n. 25
0
 def __init__(self, margin):
     super().__init__()
     self.margin = margin
     if self.margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=self.margin,
                                                  reduction='mean')
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 26
0
 def __init__(self, margin=0, use_weight=True):
     super(TripletLoss, self).__init__()
     self.margin = margin
     self.use_weight = use_weight
     self.ranking_loss = nn.MarginRankingLoss(margin=margin, reduce=False) \
         if margin != "soft_margin" else nn.SoftMarginLoss(reduce=False)
     self.softmax = nn.Softmax(dim=1)
     self.softmin = nn.Softmin(dim=1)
Esempio n. 27
0
 def __init__(self, margin=None, normalize_feature=True):
     super(TripletLoss, self).__init__()
     self.margin = margin
     self.normalize_feature = normalize_feature
     if self.margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 28
0
    def __init__(self, alpha=0.2):
        super(ClusterLoss, self).__init__()
        self.alpha = alpha
        self.ranking_loss = nn.SoftMarginLoss()

        self.clusters_sum = []
        self.clusters_count = []
        self.clusters_labels = []
Esempio n. 29
0
 def __init__(self, margin=None, process_dists=False):
     super(TripletLoss, self).__init__()
     self.margin = margin
     self.process_dists = process_dists
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()
Esempio n. 30
0
 def __init__(self, margin=None,K1=4,K2=4):
     self.margin = margin
     self.K1 = K1
     self.K2 = K2
     if margin is not None:
         self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     else:
         self.ranking_loss = nn.SoftMarginLoss()