Ejemplo n.º 1
0
    def forward(self, input1, input2, y, avg=True):

        hingeloss = nn.HingeEmbeddingLoss(margin=self.margin,
                                          size_average=self.avg)
        kldiv1 = torch.sum(input1 * torch.log(input1 / input2), 1)
        loss1 = hingeloss(kldiv1, y)

        hingeloss2 = nn.HingeEmbeddingLoss(margin=self.margin,
                                           size_average=self.avg)
        kldiv2 = torch.sum(input2 * torch.log(input2 / input1), 1)
        loss2 = hingeloss2(kldiv2, y)

        return loss1 + loss2
Ejemplo n.º 2
0
def setup(model, opt):

    if opt.criterion == "l1":
        criterion = nn.L1Loss().cuda()
    elif opt.criterion == "mse":
        criterion = nn.MSELoss().cuda()
    elif opt.criterion == "crossentropy":
        criterion = nn.CrossEntropyLoss().cuda()
    elif opt.criterion == "hingeEmbedding":
        criterion = nn.HingeEmbeddingLoss().cuda()
    elif opt.criterion == "tripletmargin":
        criterion = nn.TripletMarginLoss(margin=opt.margin,
                                         swap=opt.anchorswap).cuda()

    parameters = filter(lambda p: p.requires_grad, model.parameters())

    if opt.optimType == 'sgd':
        optimizer = optim.SGD(parameters,
                              lr=opt.lr,
                              momentum=opt.momentum,
                              nesterov=opt.nesterov,
                              weight_decay=opt.weightDecay)
    elif opt.optimType == 'adam':
        optimizer = optim.Adam(parameters,
                               lr=opt.maxlr,
                               weight_decay=opt.weightDecay)

    if opt.weight_init:
        utils.weights_init(model, opt)

    return model, criterion, optimizer
Ejemplo n.º 3
0
	def forward(self, feature_p, feature_g, identity_p, identity_g, target):
		log_soft = nn.LogSoftmax(1)
		lsoft_p = log_soft(identity_p)
		lsoft_g = log_soft(identity_g)

		dist = nn.PairwiseDistance(p=2)
		pair_dist = dist(feature_p, feature_g)  # 欧几里得距离

		# 1.折页损失
		hing = nn.HingeEmbeddingLoss(margin=self.hinge_margin, reduce=False)
		label0 = torch.tensor(target[0]).type(
			torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor)
		hing_loss = hing(pair_dist, label0)

		# 2.交叉熵损失
		nll = nn.NLLLoss()
		label1 = torch.tensor([target[1]]).type(
			torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor)
		label2 = torch.tensor([target[2]]).type(
			torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor)
		loss_p = nll(lsoft_p, label1)
		loss_g = nll(lsoft_g, label2)

		# 3.损失求和
		total_loss = hing_loss + loss_p + loss_g
		# mean_loss = torch.mean(total_loss)
		# loss = torch.sum(total_loss)

		return total_loss
Ejemplo n.º 4
0
def train(X, Y, model, args):
    X = torch.FloatTensor(X)
    Y = torch.FloatTensor(Y)
    N = len(Y)

    cri = nn.HingeEmbeddingLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr)

    model.train()
    for epoch in range(args.epoch):
        perm = torch.randperm(N)
        sum_loss = 0

        for i in range(0, N, args.batchsize):
            x = X[perm[i:i + args.batchsize]].to(args.device)
            y = Y[perm[i:i + args.batchsize]].to(args.device)

            optimizer.zero_grad()
            output = model(x).squeeze()
            weight = model.weight.squeeze()

            loss = torch.mean(torch.clamp(1 - y * output, min=0))
            loss += args.c * (weight.t() @ weight) / 2.0

            loss.backward()
            optimizer.step()

            sum_loss += float(loss)

        print("Epoch: {:4d}\tloss: {}".format(epoch, sum_loss / N))
Ejemplo n.º 5
0
    def __init__(self, args, vocabs):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.args = args
        type_vocab = vocabs[Constants.TYPE_VOCAB]
        self.coarse_ids = type_vocab.get_coarse_ids()
        self.fine_ids = type_vocab.get_fine_ids()
        self.ultrafine_ids = type_vocab.get_ultrafine_ids()
        self.ids = [self.coarse_ids, self.fine_ids, self.ultrafine_ids]

        super(Model, self).__init__()
        self.word_lut = nn.Embedding(vocabs[Constants.TOKEN_VOCAB].size_of_word2vecs(), args.emb_size,
                                     padding_idx=Constants.PAD)
        self.type_lut = nn.Embedding(vocabs[Constants.TYPE_VOCAB].size(), args.type_dims)

        self.mention_encoder = MentionEncoder(vocabs[Constants.CHAR_VOCAB], args)
        self.context_encoder = ContextEncoder(args)
        self.feature_len = args.context_rnn_size * 2 + args.emb_size + args.char_emb_size

        self.coarse_projector = Projector(args, self.feature_len)
        self.fine_projector = Projector(args, self.feature_len + args.type_dims)
        self.ultrafine_projector = Projector(args, self.feature_len + args.type_dims)

        self.hyperbolic = args.metric == "hyperbolic"
        self.distance_function = PoincareDistance.apply if self.hyperbolic else nn.PairwiseDistance()
        self.cos_sim_function = nn.CosineSimilarity()
        self.hinge_loss_function = nn.HingeEmbeddingLoss()
Ejemplo n.º 6
0
 def __init__(self, loadNumber, modelName):
     self.d_model = Discriminator()
     self.g_model = Generator()
     self.loadNumber = loadNumber
     self.modelName = modelName
     if loadNumber != -1:
         loadDirectory = func.getLoadDirectory()
         self.d_model.load_state_dict(
             torch.load(loadDirectory + self.modelName +
                        '_discriminator{}.pkl'.format(loadNumber),
                        map_location=lambda storage, loc: storage))
         self.g_model.load_state_dict(
             torch.load(loadDirectory + self.modelName +
                        '_generator{}.pkl'.format(loadNumber),
                        map_location=lambda storage, loc: storage))
         func.print_debug("{}'th Model Data Loaded".format(loadNumber))
     self.criterion_MSE = nn.MSELoss()
     self.criterion_BCE = nn.BCELoss()
     self.criterion_HEL = nn.HingeEmbeddingLoss()
     self.d_optimizer = torch.optim.Adam(self.d_model.parameters(),
                                         lr=env.GAN_LEARNING_RATE,
                                         betas=(0.5, 0.999))
     self.g_optimizer = torch.optim.Adam(self.g_model.parameters(),
                                         lr=env.GAN_LEARNING_RATE,
                                         betas=(0.5, 0.999))
     self.d_model = func.cuda(self.d_model)
     self.g_model = func.cuda(self.g_model)
     if loadNumber == -1:
         self.d_model.apply(self.weights_init)
         self.g_model.apply(self.weights_init)
Ejemplo n.º 7
0
    def __init__(self, config):
        '''
        seqlen = 16
        person_num = 150
        rnn_type = 'RNN'
        learning_rate = 0.001
        lr_decay_epoch = 300
        cuda = True
        '''

        self.config = config
        self.config['cuda'] = torch.cuda.is_available() and self.config['cuda']

        self.classify_loss = nn.NLLLoss()
        self.hinge_loss = nn.HingeEmbeddingLoss(self.config['margin'])
        self.cos_loss = nn.CosineEmbeddingLoss(0.1)

        self.model = full_model(self.config)
        if self.config['cuda'] is True:
            self.model.cuda()

        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.config['learning_rate'],
                                   momentum=0.9)
        # self.optimizer = optim.Adam(self.model.parameters(), lr=self.config['learning_rate'])

        self.FloatTensor = torch.cuda.FloatTensor if self.config[
            'cuda'] else torch.Tensor
        self.LongTensor = torch.cuda.LongTensor if self.config[
            'cuda'] else torch.LongTensor
Ejemplo n.º 8
0
 def __init__(self, margin=0, loss='hinge'):
     super(TupletLoss, self).__init__()
     self.margin = margin
     self.loss = loss
     if loss == 'hinge':
         self.loss_f = nn.HingeEmbeddingLoss(margin)
     elif loss == 'bce':
         self.loss_f = nn.BCELoss()
Ejemplo n.º 9
0
    def forward(self, positive, negative_1):
        scores = torch.cat([positive, negative_1], dim=-1)
        truth = torch.ones(1, positive.shape[1] + negative_1.shape[1]).cuda()
        truth[0, 0] = -1
        truth = -truth
        truth = torch.autograd.Variable(truth, requires_grad=False)

        return nn.HingeEmbeddingLoss(margin=5)(scores, truth)
Ejemplo n.º 10
0
 def criterion(self,
               margin=1.0,
               size_average=None,
               reduce=None,
               reduction='mean') -> nn.HingeEmbeddingLoss:
     return nn.HingeEmbeddingLoss(margin=margin,
                                  size_average=size_average,
                                  reduce=reduce,
                                  reduction=reduction)
Ejemplo n.º 11
0
    def __init__(self, args, word_embeddings: TextFieldEmbedder,
                 encoder: Seq2VecEncoder, relation_nums: int, d1: int, d2: int,
                 vocab, er_vocab):
        super().__init__(vocab)
        self.args = args
        self.encoder = encoder  # DefinitionSentenceEncoder
        self.R = torch.nn.Embedding(relation_nums, d2)  # .to('cuda:2')

        if torch.cuda.is_available():
            self.R.cuda()

        if torch.cuda.is_available():
            self.W = torch.nn.Parameter(
                torch.tensor(np.random.uniform(-1, 1, (d2, d1, d1)),
                             dtype=torch.float,
                             device="cuda",
                             requires_grad=True))  # .to('cuda:3')
        else:
            self.W = torch.nn.Parameter(
                torch.tensor(np.random.uniform(-1, 1, (d2, d1, d1)),
                             dtype=torch.float,
                             device="cpu",
                             requires_grad=True))

        self.input_dropout = torch.nn.Dropout(self.args.input_dropout)
        self.hidden_dropout1 = torch.nn.Dropout(self.args.hidden_dropout1)
        self.hidden_dropout2 = torch.nn.Dropout(self.args.hidden_dropout2)
        # self.loss = torch.nn.BCELoss()

        self.loss_BCE = torch.nn.BCELoss()
        self.loss_Marginranking = nn.MarginRankingLoss(
            margin=self.args.margin_for_marginrankloss)
        # https://github.com/TanyaZhao/groupchat_pytorch/blob/4883638769abddc8030d234ec59f01a2d29907e9/class_loss.py

        self.bn0 = torch.nn.BatchNorm1d(d1)
        self.bn1 = torch.nn.BatchNorm1d(d1)

        self.def2kgdim = torch.nn.Linear(self.encoder.get_output_dim(), d1)
        self.accuracy = CategoricalAccuracy()

        self.loss_hinge = nn.HingeEmbeddingLoss(
            margin=self.args.margin_for_hingeemb)

        self.R_proj2worddim_for_relatt = torch.nn.Linear(
            d2, self.encoder.get_output_dim())
        self.conv = nn.Conv2d(in_channels=relation_nums,
                              out_channels=relation_nums,
                              kernel_size=(self.args.ngram - 1, 1),
                              padding=((self.args.ngram - 1) // 2, 0))

        self.att_conv = nn.Conv1d(relation_nums,
                                  relation_nums,
                                  kernel_size=(self.args.ngram - 1, 1),
                                  padding=((self.args.ngram - 1) // 2, 0))
        self.er_vocab = er_vocab
        self.evaluate_flag = 0
Ejemplo n.º 12
0
 def __init__(self):
     self.activations = {
         'sigmoid': nn.Sigmoid(),
         'relu': nn.ReLU(),
         'relu6': nn.ReLU6(),
         'rrelu0103': nn.RReLU(0.1, 0.3),
         'rrelu0205': nn.RReLU(0.2, 0.5),
         'htang1': nn.Hardtanh(-1, 1),
         'htang2': nn.Hardtanh(-2, 2),
         'htang3': nn.Hardtanh(-3, 3),
         'tanh': nn.Tanh(),
         'elu': nn.ELU(),
         'selu': nn.SELU(),
         'hardshrink': nn.Hardshrink(),
         'leakyrelu01': nn.LeakyReLU(0.1),
         'leakyrelu001': nn.LeakyReLU(0.01),
         'logsigmoid': nn.LogSigmoid(),
         'prelu': nn.PReLU(),
     }
     self.loss_functions = {
         'binary_cross_entropy': nn.BCELoss(),
         'binary_cross_entropy_with_logits': nn.BCEWithLogitsLoss(),
         'poisson_nll_loss': nn.PoissonNLLLoss(),
         # 'cosine_embedding_loss': nn.CosineEmbeddingLoss(),
         # 'cross_entropy': nn.CrossEntropyLoss(),
         # 'ctc_loss': nn.CTCLoss(),
         'hinge_embedding_loss': nn.HingeEmbeddingLoss(),
         'kl_div': nn.KLDivLoss(),
         'l1_loss': nn.L1Loss(),
         'mse_loss': nn.MSELoss(),
         # 'margin_ranking_loss': nn.MarginRankingLoss(),
         # 'multilabel_margin_loss': nn.MultiLabelMarginLoss(),
         'multilabel_soft_margin_loss': nn.MultiLabelSoftMarginLoss(),
         # 'multi_margin_loss': nn.MultiMarginLoss(),
         # 'nll_loss': nn.NLLLoss(),
         'smooth_l1_loss': nn.SmoothL1Loss(),
         'soft_margin_loss': nn.SoftMarginLoss(),
         # 'triplet_margin_loss': nn.TripletMarginLoss(),
     }
     self.learning_rate = 2.8
     self.momentum = 0.8
     self.hidden_size = 10
     self.activation_hidden = 'relu'
     self.loss_function = 'binary_cross_entropy'
     self.sols = {}
     self.solsSum = {}
     self.random = 3
     self.random_grid = [_ for _ in range(10)]
     # self.hidden_size_grid = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
     # self.hidden_size_grid = [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
     # self.learning_rate_grid = [0.1, 1.0, 2.0, 3.0, 5.0]
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.loss_function_grid = list(self.loss_functions.keys())
     self.grid_search = GridSearch(self)
     self.grid_search.set_enabled(False)
Ejemplo n.º 13
0
def get_fm_loss(real_feats, fake_feats):
    criterion = nn.HingeEmbeddingLoss()
    losses = 0
    for real_feat, fake_feat in zip(real_feats, fake_feats):
        l2 = (real_feat.mean(0) - fake_feat.mean(0)) * (real_feat.mean(0) -
                                                        fake_feat.mean(0))
        loss = criterion(l2, Variable(torch.ones(l2.size())).cuda())
        losses += loss

    return losses
Ejemplo n.º 14
0
 def __init__(self,
              batch_size,
              device,
              alpha=0.5,
              criterion=nn.HingeEmbeddingLoss(margin=-1)):
     super().__init__()
     self.criterion = criterion
     self.device = device
     self.batch_size = batch_size
     self.alpha = alpha
Ejemplo n.º 15
0
 def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
     super(GANloss, self).__init__()
     self.register_buffer('real_label', torch.tensor(target_real_label))
     self.register_buffer('fake_label', torch.tensor(target_fake_label))
     if gan_mode == "lsgan":
         self.loss = nn.MSELoss()
     elif gan_mode == "vanilla":
         self.loss = nn.BCEWithLogitsLoss()
     elif gan_mode == "discriminator":
         self.loss = nn.HingeEmbeddingLoss(10)
Ejemplo n.º 16
0
 def make_criterion(self):
     if self.loss_name == "mse":
         self.criterion = nn.MSELoss()
     elif self.loss_name == "mae":
         self.criterion = self.mean_absolute_loss
     elif self.loss_name == 'hindge':
         self.criterion = nn.HingeEmbeddingLoss()
     elif self.loss_name == 'bce':
         self.criterion = nn.BCEWithLogitsLoss()
     else:
         raise NotImplementedError
Ejemplo n.º 17
0
def train(net,train_dataset,batchsize, l2=0.2, hinge=False):
    net.train()

    ##############################################################################
    # Choose Optimizer
    ##############################################################################
    optimizer = optim.SGD(net.parameters(), lr = 1e-3, weight_decay=l2)
    criterion = nn.BCEWithLogitsLoss()
    # If the loss is hingeloss
    if hinge:
        critertion = nn.HingeEmbeddingLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()

    # Iterate for each epoch
    ##############################################################################
    for epoch in range(max_epochs):
        sum_loss=0
        train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                   batch_size=batchsize,
                                                   shuffle=True)
        for i, (images, labels) in enumerate(train_loader):
            images=torch.unsqueeze(images,1)
            labels=torch.unsqueeze(labels,1)
            labels=labels.float()

            data = Variable(images)
            target = Variable(labels)
            # If the loss is hinge loss
            if hinge:
                target[target==0] = -1

            ####### Optimize
            optimizer.zero_grad()
            ####### Calculate output
            output = net(data)
            ####### Calculate and update based on loss
            loss=criterion(output,target)
            # If the loss is hinge loss
            if hinge:
                #max(0, 1 - target*output)
                loss = torch.mean(torch.clamp(1 - target*output, min=0))
            else:
                loss = criterion(output,target)

            loss.backward()
            optimizer.step()

            ####### Sum loss
            sum_loss += loss.data.numpy()[0]
        if verbose:
            print ('Batchsize %d Tranining Epoch [%d/%d], Loss: %.4f' %(batchsize,epoch+1, max_epochs, sum_loss))
Ejemplo n.º 18
0
def make_loss(cfg):
    if cfg.LOSS.L1_TYPE == 'L1+perL1':
        L1_loss = L1_plus_perceptualLoss(
            lambda_L1=cfg.LOSS.LAMBDA_L1,
            lambda_perceptual=cfg.LOSS.LAMBDA_PER,
            perceptual_layers=cfg.LOSS.NUM_LAYERS_VGG,
            percep_is_l1=1)
    elif cfg.LOSS.L1_TYPE == 'L1':
        L1_loss = cfg.LOSS.LAMBDA_L1 * nn.L1Loss()
    GAN_Loss = GANLoss()
    ReID_Loss = nn.HingeEmbeddingLoss(margin=1,
                                      size_average=True,
                                      reduce=None,
                                      reduction='mean')
    return GAN_Loss, L1_loss, ReID_Loss
    def create_inner_prod_link_predictor(self):
        self.margin = -0.2
        self.target_embedder.set_margin(self.margin)
        self.link_predictor = CosineLinkPredictor(margin=self.margin).to(self.device)
        self.hinge_loss = nn.HingeEmbeddingLoss(margin=1. - self.margin)

        def cosine_loss(x1, x2, label):
            sim = nn.CosineSimilarity()
            dist = 1. - sim(x1, x2)
            return self.hinge_loss(dist, label)

        # self.cosine_loss = CosineEmbeddingLoss(margin=self.margin)
        self.cosine_loss = cosine_loss
        self.positive_label = 1.
        self.negative_label = -1.
        self.label_dtype = torch.float32
Ejemplo n.º 20
0
    def _init_nn(self):
        """Initialize the nn model for training."""
        self.dict_args = {'data_type': self.data_type,
                          'feature_dim': self.feature_dim,
                          'user_embdim': self.u_embdim,
                          'user_count': self.train_data.n_users,
                          'bn_momentum': self.bn_momentum,
                          'dropout': self.dropout,
                          'model_type': self.model_type,
                          'word_embdim': self.word_embdim,
                          'word_embeddings': self.word_embeddings,
                          'hidden_size': self.hidden_size,
                          'dropout_rnn': self.dropout_rnn,
                          'vocab_size': self.vocab_size,
                          'attention': self.attention,
                          'batch_size': self.batch_size}

        self.model = DCUELMNet(self.dict_args)

        if self.freeze_conv:
            for param in self.model.conv.parameters():
                param.requires_grad = False

        self.loss_func = nn.HingeEmbeddingLoss(margin=self.margin)
        self.side_loss_func = nn.NLLLoss()
        if self.optimize == 'adam':
            self.optimizer = optim.Adam(
                self.model.parameters(), self.lr,
                (self.beta_one, self.beta_two),
                self.eps, self.weight_decay)
        elif self.optimize == 'sgd':
            self.optimizer = optim.SGD(
                self.model.parameters(), self.lr, self.beta_one,
                weight_decay=self.weight_decay, nesterov=True)
        elif self.optimize == 'swats':
            self.optimizer = Swats(
                self.model.parameters(), self.lr,
                (self.beta_one, self.beta_two),
                self.eps, self.weight_decay)

        self.scheduler = MultiStepLR(
            self.optimizer, milestones=[4, 9], gamma=0.1)

        if self.USE_CUDA:
            self.model = self.model.cuda()
            self.loss_func = self.loss_func.cuda()
            self.side_loss_func = self.side_loss_func.cuda()
Ejemplo n.º 21
0
    def __init__(self, args):
        super(SNRM, self).__init__()
        self.update_lr = args.learning_rate
        self.dropout_r = args.dropout_parameter
        self.regularization = args.regularization_term
        self.emb_dim = args.emb_dim
        self.conv1_ch = args.conv1_channel
        self.conv2_ch = args.conv2_channel
        self.conv3_ch = args.conv3_channel

        ## make network
        self.features = self._make_layers()

        ## hinge loss
        self.loss = nn.HingeEmbeddingLoss()

        ## optimizer
        self.optimizer = optim.Adam(self.parameters(), lr=args.learning_rate)
Ejemplo n.º 22
0
    def forward(self,
                input_ids=None,
                attention_mask=None,
                labels=None,
                pos_weight=None):
        bert_outputs = self.bert(input_ids, attention_mask=attention_mask)

        pooled_output = bert_outputs[0][:, 0]
        pooled_output = self.dropout(
            pooled_output)  # shape (batch_size, hidden_size)
        pooled_output = nn.GELU()(self.preclass1(pooled_output))
        pooled_output = self.dropout(
            pooled_output)  # shape (batch_size, hidden_size)
        pooled_output = nn.GELU()(self.preclass2(pooled_output))
        cosine = self.cosine(
            self.embedding.unsqueeze(dim=0).repeat(pooled_output.size()[0], 1,
                                                   1),
            pooled_output.unsqueeze(1).repeat(1, self.num_labels, 1))
        if self.agents_extended > 0:
            ext_cosine = self.cosine(
                self.extend_embedding.unsqueeze(dim=0).repeat(
                    pooled_output.size()[0], 1, 1),
                pooled_output.unsqueeze(1).repeat(1, self.agents_extended, 1))
            cosine = torch.cat((cosine, ext_cosine), dim=1)
        outputs = (cosine, )

        if labels is not None:
            # against class imbalances
            if pos_weight is None:
                pos_weight = torch.ones(cosine.size()[1]).float()
            pos_weight = torch.clamp(
                pos_weight.repeat(cosine.size()[0], 1) * labels, 1, 1000)
            loss_fct = nn.HingeEmbeddingLoss(reduction="none")
            cos_dist = 1 - cosine
            labels = labels * 2 - 1  # transform to -1, 1 labels
            hinges = torch.cat([
                loss_fct(cos_dist[:, i], labels[:, i])
                for i in range(cos_dist.size()[1])
            ]).reshape(cos_dist.size()[0], -1)
            loss = torch.mean(pos_weight * hinges)
            outputs = outputs + (loss, )

        return outputs  # sigmoid(logits), (loss)
Ejemplo n.º 23
0
def compute_fm_loss(real_feats, fake_feats, criterion='HingeEmbeddingLoss', cuda=False):
    '''compute distance bwtween real_feats and fake_feats, instead of l1loss
    ...fork from [discogan](https://github.com/SKTBrain/DiscoGAN/blob/master/discogan/image_translation.py)'s 
    ...get_fm_loss

    @Params:
    - real_feats: real img's features, **not the last output of netD, and is hidden-layers's output**
    - fake_feats: same as upone, but just from fake imgs
    - criterion: criterion type, defalyt is `HingeEmbeddingLoss`
    '''
    if criterion == 'HingeEmbeddingLoss':
        criterion = nn.HingeEmbeddingLoss()
    losses = 0
    for real_feat, fake_feat in zip(real_feats, fake_feats):
        l2 = (real_feat.mean() - fake_feat.mean()) * (real_feat.mean() - fake_feat.mean())
        if cuda:
            loss = criterion(l2, Variable(torch.ones(l2.size())).cuda())
        else:
            loss = criterion(l2, Variable(torch.ones(l2.size())))
        losses += loss
    return losses
Ejemplo n.º 24
0
    def forward(self, feature_p, feature_g, identity_p, identity_g, target):
        dist = nn.PairwiseDistance(p=2)
        pair_dist = dist(feature_p, feature_g)  # 欧几里得距离

        # 1.折页损失
        hing = nn.HingeEmbeddingLoss(margin=self.hinge_margin, reduce=False)
        label0 = target[0].to(self.device)
        hing_loss = hing(pair_dist, label0)

        # 2.交叉熵损失
        nll = nn.CrossEntropyLoss()
        label1 = target[1].to(self.device)
        label2 = target[2].to(self.device)
        loss_p = nll(identity_p, label1)
        loss_g = nll(identity_g, label2)

        # 3.损失求和
        total_loss = hing_loss + loss_p + loss_g
        mean_loss = torch.mean(total_loss)

        return mean_loss
Ejemplo n.º 25
0
    def _init_nn(self):
        """Initialize the nn model for training."""
        self.dict_args = {
            'data_type': self.data_type,
            'feature_dim': self.feature_dim,
            'user_embdim': self.u_embdim,
            'user_count': self.train_data.n_users,
            'bn_momentum': self.bn_momentum,
            'model_type': self.model_type
        }

        self.model = DCUENet(self.dict_args)

        self.loss_func = nn.HingeEmbeddingLoss(margin=self.margin)
        self.optimizer = optim.Adam(self.model.parameters(), self.lr,
                                    (self.beta_one, self.beta_two), self.eps,
                                    self.weight_decay)

        if self.USE_CUDA:
            self.model = self.model.cuda()
            self.loss_func = self.loss_func.cuda()
Ejemplo n.º 26
0
    def __init__(self, model, learning_rate, optimizer=None, criterion=None):

        # Select device
        if torch.cuda.is_available():
            self.device = torch.device("cuda:0")
        else:
            self.device = torch.device("cpu")

        # Initialize model
        self.model = model.to(self.device)

        # Select optimizer
        if optimizer == 'sgd':
            self.optimizer = optim.SGD(model.parameters(), lr=learning_rate)
        elif optimizer == 'adam':
            self.optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        elif optimizer == 'adagrad':
            self.optimizer = optim.Adagrad(model.parameters(),
                                           lr=learning_rate)
        elif optimizer == 'rmsprop':
            self.optimizer = optim.RMSprop(model.parameters(),
                                           lr=learning_rate)
        else:
            logging.error('Incorrect optimizer value')
            return

        # Select loss function
        if criterion == 'crossentropyloss':
            self.criterion = nn.CrossEntropyLoss()
        elif criterion == 'hingeembeddingloss':
            self.criterion = nn.HingeEmbeddingLoss()
        else:
            logging.error('Incorrect loss function')
            return

        # create tensorboard output
        self.experiment_name = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.writer = SummaryWriter("logs/" + self.experiment_name)
Ejemplo n.º 27
0
 def __init__(self,
              margin=0.3,
              num_points=250,
              border_ic=6,
              env_points=200,
              category=False,
              outputD=64,
              v23=False,
              alpha=0.5,
              freeze=False,
              residual=False):
     super().__init__()
     torch.autograd.set_detect_anomaly(True)
     print('TransformerTrackerEmb')
     self.point_feat = PoseNetFeatOffsetEmb(num_points=num_points,
                                            ic=3,
                                            border_points=env_points,
                                            border_ic=border_ic,
                                            output_dim=outputD,
                                            category=True)
     self.num_points = num_points
     self.margin = margin
     self.ranking_loss = nn.MarginRankingLoss(margin=margin)
     #self.cos_emb_loss = nn.CosineEmbeddingLoss(margin=margin)
     self.hinge_loss = nn.HingeEmbeddingLoss(margin=margin)
     self.embedding = LocationEmbedding
     self.transformer_model = TransformerModel(outputD,
                                               2,
                                               200,
                                               2,
                                               posenc_max_len=5000)
     self.outputD = outputD
     self.alpha = alpha
     self.freeze = freeze
     self.residual = residual
     if (self.freeze):
         for param in self.point_feat.parameters():
             param.requires_grad = False
Ejemplo n.º 28
0
    def _init_nn(self):
        """Initialize the nn model for training."""
        self.dict_args = {
            'data_type': self.data_type,
            'feature_dim': self.feature_dim,
            'user_embdim': self.u_embdim,
            'user_count': self.train_data.n_users,
            'bn_momentum': self.bn_momentum,
            'dropout': self.dropout,
            'model_type': self.model_type
        }

        self.model = DCUENet(self.dict_args)

        self.loss_func = nn.HingeEmbeddingLoss(margin=self.margin)
        if self.optimize == 'adam':
            self.optimizer = optim.Adam(self.model.parameters(), self.lr,
                                        (self.beta_one, self.beta_two),
                                        self.eps, self.weight_decay)
        elif self.optimize == 'sgd':
            self.optimizer = optim.SGD(self.model.parameters(),
                                       self.lr,
                                       self.beta_one,
                                       weight_decay=self.weight_decay,
                                       nesterov=True)
        elif self.optimize == 'swats':
            self.optimizer = Swats(self.model.parameters(), self.lr,
                                   (self.beta_one, self.beta_two), self.eps,
                                   self.weight_decay)

        self.scheduler = MultiStepLR(self.optimizer,
                                     milestones=[4, 9],
                                     gamma=0.1)

        if self.USE_CUDA:
            self.model = self.model.cuda()
            self.loss_func = self.loss_func.cuda()
Ejemplo n.º 29
0
    def __init__(self, config):
        '''
        seqlen = 16
        person_num = 150
        rnn_type = 'RNN'
        learning_rate = 0.001
        lr_decay_epoch = 300
        cuda = True
        '''

        self.config = config
        self.config['cuda'] = torch.cuda.is_available() and self.config['cuda']

        self.classify_loss = nn.NLLLoss()
        self.hinge_loss = nn.HingeEmbeddingLoss(self.config['margin'])
        self.cos_loss = nn.CosineEmbeddingLoss()

        self.model = model(seqlen=self.config['max_seqlen'],
                           person_num=self.config['person_num'],
                           rnn_type=self.config['rnn_type'])
        if self.config['load_cnn']:
            self.model.load_state_dict(torch.load(
                self.config['save_cnn_path']))
            self.model.train(True)
        if self.config['cuda'] is True:
            self.model.cuda()

        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.config['learning_rate'],
                                   momentum=0.9)
        # self.optimizer = optim.Adam(self.model.parameters(), lr=self.config['learning_rate'])

        self.FloatTensor = torch.cuda.FloatTensor if self.config[
            'cuda'] else torch.Tensor
        self.LongTensor = torch.cuda.LongTensor if self.config[
            'cuda'] else torch.LongTensor
Ejemplo n.º 30
0
def parse_loss(loss):
    loss, kwargs = parse_str(loss)

    if loss == 'l1': return nn.L1Loss(**kwargs)
    if loss == 'mse': return nn.MSELoss(**kwargs)
    if loss == 'cross_entropy': return nn.CrossEntropyLoss(**kwargs)
    if loss == 'nll': return nn.NLLLoss(**kwargs)
    if loss == 'poisson': return nn.PoissonLoss(**kwargs)
    if loss == 'nll2d': return nn.NLLLoss2d(**kwargs)
    if loss == 'kl_div': return nn.KLDivLoss(**kwargs)
    if loss == 'bce': return nn.BCELoss(**kwargs)
    if loss == 'bce_with_logits': return nn.BCEWithLogitsLoss(**kwargs)
    if loss == 'margin_ranking': return nn.MarginRankingLoss(**kwargs)
    if loss == 'hinge_embedding': return nn.HingeEmbeddingLoss(**kwargs)
    if loss == 'multilabel_margin': return nn.MultiLabelMarginLoss(**kwargs)
    if loss == 'smooth_l1': return nn.SmoothL1Loss(**kwargs)
    if loss == 'multilabel_softmargin':
        return nn.MultiLabelSoftMarginLoss(**kwargs)
    if loss == 'cosine_embedding': return nn.CosineEmbeddingLoss(**kwargs)
    if loss == 'multi_margin': return nn.MultiMarginLoss(**kwargs)
    if loss == 'triplet_margin': return nn.TripletMarginLoss(**kwargs)

    loss = getattr(nn.functional, loss)
    return lambda x: loss(x, **kwargs)