def get_loss(global_feat, local_feat, results, labels):
    triple_loss = global_loss(TripletLoss(margin=0.2), global_feat, labels)[0] + \
                  local_loss(TripletLoss(margin=0.2), local_feat, labels)[0]
    celoss = c(results, labels)
    #print('train result:', results.size())
    #print('loss:', celoss.mean())

    return triple_loss + celoss, triple_loss.item(), celoss.item()
示例#2
0
    def __init__(self, model_path, alpha):
        """
        Constructor class
        :param model_path: is the path to the base face
         verification embedding model
        :param alpha: is the alpha to use for the triplet
         loss calculation
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = tf.keras.models.load_model(model_path)

        A = tf.keras.Input(shape=(96, 96, 3))
        P = tf.keras.Input(shape=(96, 96, 3))
        N = tf.keras.Input(shape=(96, 96, 3))

        net_01 = self.base_model(A)
        net_02 = self.base_model(P)
        net_03 = self.base_model(N)

        loss = TripletLoss(alpha)

        mix = [net_01, net_02, net_03]
        out = loss(mix)
        my_model = tf.keras.models.Model([A, P, N], out)
        my_model.compile(optimizer='adam')
        self.training_model = my_model
示例#3
0
    def __init__(self, model_path, alpha):
        """
        class constructor
        :param model_path: is the path to the base face verification
        embedding model
        :param alpha: is the alpha to use for the triplet loss calculation
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)

        A = K.Input(shape=(96, 96, 3))
        P = K.Input(shape=(96, 96, 3))
        N = K.Input(shape=(96, 96, 3))

        inputs = [A, P, N]

        output_A = self.base_model(A)
        output_P = self.base_model(P)
        output_N = self.base_model(N)

        output0 = [output_A, output_P, output_N]
        loss = TripletLoss(alpha)
        outputs = loss(output0)

        model = K.models.Model(inputs, outputs)
        model.compile(optimizer="Adam")

        self.training_model = model
    def __init__(self, model_path, alpha):
        """ Initialize Train Model
            - model_path is the path to the base face verification
            embedding model
            - loads the model using with
            tf.keras.utils.CustomObjectScope({'tf': tf}):
            - saves this model as the public instance method base_model
            - alpha is the alpha to use for the triplet loss calculation
            Creates a new model:
            inputs: [A, P, N]
            A is a numpy.ndarray containing the anchor images
            P is a numpy.ndarray containing the positive images
            N is a numpy.ndarray containing the negative images
            outputs: the triplet losses of base_model
            compiles the model with Adam optimization and no additional losses
            save this model as the public instance method training_model
            you can use from triplet_loss import TripletLoss
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)
        A_input = tf.keras.Input(shape=(96, 96, 3))
        P_input = tf.keras.Input(shape=(96, 96, 3))
        N_input = tf.keras.Input(shape=(96, 96, 3))

        predict_a = self.base_model(A_input)
        predict_b = self.base_model(P_input)
        predict_c = self.base_model(N_input)

        tl = TripletLoss(alpha)
        output = tl([predict_a, predict_b, predict_c])
        inputs = [A_input, P_input, N_input]

        training_model = K.models.Model(inputs, output)
        training_model.compile(optimizer='Adam')
        self.training_model = training_model
示例#5
0
文件: train_v11.py 项目: AlanChaw/WTI
def batch_eval(batch, model):
    model.eval()
    triplet_loss_func = TripletLoss(margin=args.margin)
    # cos_sim_func = nn.CosineSimilarity(dim=1, eps=1e-6)
    # loss_func = nn.MSELoss()

    anchor, positive, negative = batch
    anchor_feature = model(anchor)
    positive_feature = model(positive)
    negative_feature = model(negative)
    # positive_sim = cos_sim_func(anchor_feature, positive_feature)
    # negative_sim = cos_sim_func(anchor_feature, negative_feature)
    # pos_sim_loss = loss_func(positive_sim, torch.tensor([1]*len(anchor)).type(torch.FloatTensor).to(device))
    # neg_sim_loss = loss_func(negative_sim, torch.tensor([0]*len(anchor)).type(torch.FloatTensor).to(device))
    triplet_loss = triplet_loss_func(anchor_feature, positive_feature,
                                     negative_feature)
    loss = triplet_loss

    positive_distance = (anchor_feature -
                         positive_feature).pow(2).sum(1).detach()
    negative_distance = (anchor_feature -
                         negative_feature).pow(2).sum(1).detach()
    acc = torch.gt(negative_distance,
                   positive_distance).type(torch.FloatTensor).mean()
    return loss, acc, positive_distance, negative_distance
    def __init__(self, model_path, alpha):
        """
        Initialize model
        Args:
            model_path: path to the base face verification
                        embedding model
            alpha: alpha to use for the triplet loss calculation
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)
        self.alpha = alpha

        # adding inputs [A, P. N]
        A = K.Input(shape=(96, 96, 3))
        P = K.Input(shape=(96, 96, 3))
        N = K.Input(shape=(96, 96, 3))
        inputs = [A, P, N]

        X_a = self.base_model(A)
        X_p = self.base_model(P)
        X_n = self.base_model(N)
        encoded_input = [X_a, X_p, X_n]

        decoded = TripletLoss(alpha=alpha)(encoded_input)
        decoder = K.models.Model(inputs, decoded)

        self.training_model = decoder
        self.training_model.compile(optimizer='Adam')
        self.training_model.save
    def __init__(self, model_path, alpha):
        """
        constructor
        :param model_path: path to the base face verification embedding model
        :param alpha: alpha to use for the triplet loss calculation
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = tf.keras.models.load_model(model_path)

        A = tf.keras.Input(shape=(96, 96, 3))
        P = tf.keras.Input(shape=(96, 96, 3))
        N = tf.keras.Input(shape=(96, 96, 3))

        network0 = self.base_model(A)
        network1 = self.base_model(P)
        network2 = self.base_model(N)

        tl = TripletLoss(alpha)

        # combine the output of the three branches
        combined = [network0, network1, network2]
        output = tl(combined)

        my_model = tf.keras.models.Model([A, P, N], output)

        my_model.compile(optimizer='adam')

        self.training_model = my_model
示例#8
0
    def __init__(self, model_path, alpha):
        """
        Class constructor

        Arguments:
         - model_path is the path to the base face verification embedding model
            * loads the model using with
                tf.keras.utils.CustomObjectScope({'tf': tf})
            * saves this model as the public instance method base_model
         - alpha is the alpha to use for the triplet loss calculation
        """

        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = tf.keras.models.load_model(model_path)

        A = tf.keras.Input(shape=(96, 96, 3))
        P = tf.keras.Input(shape=(96, 96, 3))
        N = tf.keras.Input(shape=(96, 96, 3))

        netw1 = self.base_model(A)
        netw2 = self.base_model(P)
        netw3 = self.base_model(N)

        tl = TripletLoss(alpha)

        nall = [netw1, netw2, netw3]
        out = tl(nall)

        model = tf.keras.models.Model([A, P, N], out)
        model.compile(optimizer='adam')
        self.training_model = model
示例#9
0
    def __init__(self, model_path, alpha):
        """
        Class constructor
        Args:
            model_path: path to the base face verification embedding model
            alpha:  is the alpha to use for the triplet loss calculation
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)

        A = tf.keras.Input(shape=(96, 96, 3))
        P = tf.keras.Input(shape=(96, 96, 3))
        N = tf.keras.Input(shape=(96, 96, 3))

        output_A = self.base_model(A)
        output_P = self.base_model(P)
        output_N = self.base_model(N)

        outputs = [output_A, output_P, output_N]
        tl = TripletLoss(alpha)
        output = tl(outputs)

        model = K.models.Model([A, P, N], output)
        model.compile(optimizer="adam")
        self.training_model = model
示例#10
0
def train_model(epoch, model, optimizer, lr_scheduler, loader, center_loss=None):
    global GLOBAL_STEP

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    positive_distance_meter = AverageMeter()
    negative_distance_meter = AverageMeter()

    model.train()
    model.to(device)

    print('=' * 20 + "Model Training" + '=' * 20)

    triplet_loss = TripletLoss(args.margin)

    for i, batch in tqdm(enumerate(loader)):
        start = time.time()

        optimizer.zero_grad()
        model.zero_grad()
        anchor, positive, negative = batch
        anchor_feature = model(anchor)
        positive_feature = model(positive)
        negative_feature = model(negative)
        loss = triplet_loss(anchor_feature, positive_feature, negative_feature)
        loss.backward()
        grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_bound)
        optimizer.step()

        positive_distance = (anchor_feature - positive_feature).pow(2).sum(1).detach()
        negative_distance = (anchor_feature - negative_feature).pow(2).sum(1).detach()
        acc = torch.gt(negative_distance, positive_distance).type(torch.FloatTensor).mean()
        loss_meter.update(loss.item())
        acc_meter.update(acc.item())
        positive_distance_meter.update(positive_distance.mean().item())
        negative_distance_meter.update(negative_distance.mean().item())

        end = time.time()
        used_time = end - start
        lr = optimizer.param_groups[0]['lr']
        display = 'epoch=' + str(epoch) + \
                  '\tglobal_step=%d' % (GLOBAL_STEP) + \
                  '\tloss=%.4f' % (loss_meter.val) + \
                  '\tloss_avg=%.4f' % (loss_meter.avg) + \
                  '\tpos_avg=%.4f' % (positive_distance_meter.avg) + \
                  '\tneg_avg=%.4f' % (negative_distance_meter.avg) + \
                  '\tacc=%.4f' % (acc_meter.avg) + \
                  '\tlr=%.6f' % (lr) + \
                  '\t|g|=%.4f' % (grad_norm) + \
                  '\ttime=%.2fit/s' % (1. / used_time)
        if (GLOBAL_STEP) % args.log_every == 0:
            tqdm.write(display)
            save_mode(epoch=epoch,
                      model=model,
                      optimizer=optimizer,
                      lr_scheduler=lr_scheduler,
                      center_loss=center_loss)
        GLOBAL_STEP += 1
    return
示例#11
0
def eval_model(epoch, model, loader):
    global CURRENT_LOSS
    model.eval()
    model.to(device)

    positive_distance_meter = AverageMeter()
    negative_distance_meter = AverageMeter()
    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    triplet_loss = TripletLoss(margin=args.margin)

    print('=' * 20 + "Model Eval" + '=' * 20)
    for i, batch in tqdm(enumerate(loader)):
        start = time.time()
        with torch.no_grad():
            anchor, positive, negative = batch
            anchor_feature = model(anchor)
            positive_feature = model(positive)
            negative_feature = model(negative)
            loss = triplet_loss(anchor_feature, positive_feature,
                                negative_feature)

            positive_distance = (anchor_feature -
                                 positive_feature).pow(2).sum(1).detach()
            negative_distance = (anchor_feature -
                                 negative_feature).pow(2).sum(1).detach()
            acc = torch.gt(negative_distance,
                           positive_distance).type(torch.FloatTensor).mean()
            loss_meter.update(loss.item())
            acc_meter.update(acc.item())
            positive_distance_meter.update(positive_distance.mean().item())
            negative_distance_meter.update(negative_distance.mean().item())

            end = time.time()
            used_time = end - start
            display = 'epoch=' + str(epoch) + \
                      '\tglobal_step=%d' % (GLOBAL_STEP) + \
                      '\tloss=%.4f' % (loss_meter.val) + \
                      '\tloss_avg=%.4f' % (loss_meter.avg) + \
                      '\tpos_avg=%.4f' % (positive_distance_meter.avg) + \
                      '\tneg_avg=%.4f' % (negative_distance_meter.avg) + \
                      '\tacc=%.4f' % (acc_meter.avg) + \
                      '\ttime=%.2fit/s' % (1. / used_time)

        if (i) % (args.log_every / 2) == 0:
            tqdm.write(display)
    print("Final Acc: %.6f\n" % (acc_meter.avg))
    print("Final Loss Acc: %.6f\n" % (loss_meter.avg))
    print("Final Positive Distance: %.6f\n" % (positive_distance_meter.avg))
    print("Final Negative Distance: %.6f\n" % (negative_distance_meter.avg))
    CURRENT_LOSS = loss_meter.avg
    return
示例#12
0
文件: train_v14.py 项目: AlanChaw/WTI
def batch_eval(batch, model):
    model.eval()
    triplet_loss_func = TripletLoss(margin=args.margin)

    anchor, positive, negative = batch
    anchor_feature = model(anchor)
    positive_feature = model(positive)
    negative_feature = model(negative)
    triplet_loss = triplet_loss_func(anchor_feature, positive_feature, negative_feature)
    loss = triplet_loss

    positive_distance = (anchor_feature - positive_feature).pow(2).sum(1).detach()
    negative_distance = (anchor_feature - negative_feature).pow(2).sum(1).detach()
    acc = torch.gt(negative_distance, positive_distance).type(torch.FloatTensor).mean()
    return loss, acc, positive_distance, negative_distance
示例#13
0
    def __init__(self, model_path, alpha):
        """
        * model_path is the path to the base face verification
          embedding model
        * loads the model using with
          tf.keras.utils.CustomObjectScope({'tf': tf}):
        * saves this model as the public instance method base_model
        * alpha is the alpha to use for the triplet loss calculation
        * Creates a new model:
        * * inputs: [A, P, N]
            - A is a numpy.ndarray of shape (m, n, n, 3)containing the
              aligned anchor images
            - P is a numpy.ndarray of shape (m, n, n, 3) containing the
              aligned positive images
            - N is a numpy.ndarray of shape (m, n, n, 3)containing the
              aligned negative images
            - m is the number of images
            - n is the size of the aligned images
        * * outputs: the triplet losses of base_model
        * * compiles the model with:
            - Adam optimization
            - no additional losses
        * * save this model as the public instance attribute training_model
        * you can use from triplet_loss import TripletLoss
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = tf.keras.models.load_model(model_path)

        # complete Model
        # we create this Inputs because there are the inputs
        # of base_model model
        A = tf.keras.Input(shape=(96, 96, 3))
        P = tf.keras.Input(shape=(96, 96, 3))
        N = tf.keras.Input(shape=(96, 96, 3))

        # each one of the A P N have to enter in the base_model model
        predict_a = self.base_model(A)
        predict_b = self.base_model(P)
        predict_c = self.base_model(N)

        # the outputs have to enter in the tl
        tl = TripletLoss(alpha)
        output = tl([predict_a, predict_b, predict_c])

        # In this way we send 3 different inputs
        model_fin = tf.keras.models.Model([A, P, N], outputs=output)
        model_fin.compile(optimizer='adam')
        self.training_model = model_fin
示例#14
0
    def __init__(self, model_path, alpha):
        """  Initialize Train Model
            - model_path is the path to the base face verification
                embedding model
            - loads the model using with
                tf.keras.utils.CustomObjectScope({'tf': tf}):
            - saves this model as the public instance method base_model
            - alpha is the alpha to use for the triplet loss calculation

            Creates a new model:
            inputs: [A, P, N]
                A is a numpy.ndarray containing the anchor images
                P is a numpy.ndarray containing the positive images
                N is a numpy.ndarray containing the negative images
            outputs: the triplet losses of base_model
            compiles the model with Adam optimization and no additional losses
            save this model as the public instance method training_model
            you can use from triplet_loss import TripletLoss
        """

        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)
        self.base_model.save(base_model)

        A_input = tf.placeholder(tf.float32, (None, 96, 96, 3))
        P_input = tf.placeholder(tf.float32, (None, 96, 96, 3))
        N_input = tf.placeholder(tf.float32, (None, 96, 96, 3))
        inputs = [A_inputs, P_inputs, N_inputs]
        outputs_embedding = self.base_model(inputs)
        """
        P = self.base_model(P_input)
        N = self.base_model(N_input)
        """
        tl = TripletLoss(alpha)
        output = tl(outputs_embedding)

        training_model = K.models.Model(inputs, output)
        training_model.compile(optimizer='Adam')
        training_model.save('training_model')
示例#15
0
    def __init__(self, model_path, alpha):
        """
        Initialize model
        """
        with tf.keras.utils.CustomObjectScope({'tf': tf}):
            self.base_model = K.models.load_model(model_path)
        self.alpha = alpha

        A = K.Input(shape=(96, 96, 3))
        P = K.Input(shape=(96, 96, 3))
        N = K.Input(shape=(96, 96, 3))
        inputs = [A, P, N]

        X_a = self.base_model(A)
        X_p = self.base_model(P)
        X_n = self.base_model(N)
        encoded_input = [X_a, X_p, X_n]

        decoded = TripletLoss(alpha=alpha)(encoded_input)
        decoder = K.models.Model(inputs, decoded)

        self.training_model = decoder
        self.training_model.compile(optimizer='Adam')
        self.training_model.save
示例#16
0
	num_feats = 3
	closs_weight = 1
	feat_dim = 2300

	learningRate = 1e-2


	hidden_sizes = [32, 64, 128, 256]
	num_classes = 2300

	device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

	network = Network(num_feats, hidden_sizes, num_classes)
	cuda = torch.cuda.is_available()
	device = torch.device("cuda" if cuda else "cpu")

	network.linear_label = nn.Linear(256, 4600)
	network.linear_closs = nn.Linear(256, 4600)

	val_data = val_datasets('validation_trials_verification.txt', 'validation_verification', data_transform_val)

	val_loader_args = dict(shuffle=False, batch_size= 256, pin_memory=True, num_workers = 8) 
	val_loader = Data.DataLoader(val_data, **val_loader_args)
	numEpochs = 100
	model = nn.Sequential(*list(network.children())[:-3])
	criterion = TripletLoss(margin = 0.7)
	optimizer = optim.Adam(model.parameters(), lr = 0.0001)
	model.train()
	model.to(device)
	train_verification(model, val_loader, 40)
示例#17
0
def main():
    args = parser.parse_args()

    if args.output:
        output_base = args.output
    else:
        output_base = './output'
    exp_name = '-'.join([
        datetime.now().strftime("%Y%m%d-%H%M%S"), args.model, args.gp,
        'f' + str(args.fold)
    ])
    output_dir = get_outdir(output_base, 'train', exp_name)

    train_input_root = os.path.join(args.data)
    batch_size = args.p * args.k
    num_epochs = args.epochs
    wav_size = (16000, )
    num_classes = 128  # triplet embedding size

    torch.manual_seed(args.seed)

    model = model_factory.create_model(args.model,
                                       in_chs=1,
                                       pretrained=args.pretrained,
                                       num_classes=num_classes,
                                       drop_rate=args.drop,
                                       global_pool=args.gp,
                                       embedding_net=True,
                                       embedding_norm=2.,
                                       embedding_act_fn=torch.sigmoid,
                                       checkpoint_path=args.initial_checkpoint)

    dataset_train = dataset.CommandsDataset(
        root=train_input_root,
        mode='train',
        fold=args.fold,
        wav_size=wav_size,
        format='spectrogram',
        train_unknown=False,
    )

    loader_train = data.DataLoader(dataset_train,
                                   batch_size=batch_size,
                                   pin_memory=True,
                                   sampler=dataset.PKSampler(dataset_train,
                                                             p=args.p,
                                                             k=args.k),
                                   num_workers=args.workers)

    dataset_eval = dataset.CommandsDataset(
        root=train_input_root,
        mode='validate',
        fold=args.fold,
        wav_size=wav_size,
        format='spectrogram',
        train_unknown=False,
    )

    loader_eval = data.DataLoader(dataset_eval,
                                  batch_size=batch_size,
                                  pin_memory=True,
                                  sampler=dataset.PKSampler(dataset_eval,
                                                            p=args.p,
                                                            k=args.k),
                                  num_workers=args.workers)

    train_loss_fn = validate_loss_fn = TripletLoss(margin=0.5, sample=True)
    train_loss_fn = train_loss_fn.cuda()
    validate_loss_fn = validate_loss_fn.cuda()

    opt_params = list(model.parameters())
    if args.opt.lower() == 'sgd':
        optimizer = optim.SGD(opt_params,
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay,
                              nesterov=True)
    elif args.opt.lower() == 'adam':
        optimizer = optim.Adam(opt_params,
                               lr=args.lr,
                               weight_decay=args.weight_decay,
                               eps=args.opt_eps)
    elif args.opt.lower() == 'nadam':
        optimizer = nadam.Nadam(opt_params,
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                eps=args.opt_eps)
    elif args.opt.lower() == 'adadelta':
        optimizer = optim.Adadelta(opt_params,
                                   lr=args.lr,
                                   weight_decay=args.weight_decay,
                                   eps=args.opt_eps)
    elif args.opt.lower() == 'rmsprop':
        optimizer = optim.RMSprop(opt_params,
                                  lr=args.lr,
                                  alpha=0.9,
                                  eps=args.opt_eps,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
    else:
        assert False and "Invalid optimizer"
    del opt_params

    if not args.decay_epochs:
        print('No decay epoch set, using plateau scheduler.')
        lr_scheduler = ReduceLROnPlateau(optimizer, patience=10)
    else:
        lr_scheduler = None

    # optionally resume from a checkpoint
    start_epoch = 0 if args.start_epoch is None else args.start_epoch
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
                if 'args' in checkpoint:
                    print(checkpoint['args'])
                new_state_dict = OrderedDict()
                for k, v in checkpoint['state_dict'].items():
                    if k.startswith('module'):
                        name = k[7:]  # remove `module.`
                    else:
                        name = k
                    new_state_dict[name] = v
                model.load_state_dict(new_state_dict)
                if 'optimizer' in checkpoint:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                if 'loss' in checkpoint:
                    train_loss_fn.load_state_dict(checkpoint['loss'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
                start_epoch = checkpoint[
                    'epoch'] if args.start_epoch is None else args.start_epoch
            else:
                model.load_state_dict(checkpoint)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            exit(1)

    saver = CheckpointSaver(checkpoint_dir=output_dir)

    if args.num_gpu > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          args.num_gpu))).cuda()
    else:
        model.cuda()

    best_loss = None
    try:
        for epoch in range(start_epoch, num_epochs):
            if args.decay_epochs:
                adjust_learning_rate(optimizer,
                                     epoch,
                                     initial_lr=args.lr,
                                     decay_rate=args.decay_rate,
                                     decay_epochs=args.decay_epochs)

            train_metrics = train_epoch(epoch,
                                        model,
                                        loader_train,
                                        optimizer,
                                        train_loss_fn,
                                        args,
                                        saver=saver,
                                        output_dir=output_dir)

            # save a recovery in case validation blows up
            saver.save_recovery(
                {
                    'epoch': epoch + 1,
                    'arch': args.model,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'loss': train_loss_fn.state_dict(),
                    'args': args,
                    'gp': args.gp,
                },
                epoch=epoch + 1,
                batch_idx=0)

            step = epoch * len(loader_train)
            eval_metrics = validate(step,
                                    model,
                                    loader_eval,
                                    validate_loss_fn,
                                    args,
                                    output_dir=output_dir)

            if lr_scheduler is not None:
                lr_scheduler.step(eval_metrics['eval_loss'])

            rowd = OrderedDict(epoch=epoch)
            rowd.update(train_metrics)
            rowd.update(eval_metrics)
            with open(os.path.join(output_dir, 'summary.csv'), mode='a') as cf:
                dw = csv.DictWriter(cf, fieldnames=rowd.keys())
                if best_loss is None:  # first iteration (epoch == 1 can't be used)
                    dw.writeheader()
                dw.writerow(rowd)

            # save proper checkpoint with eval metric
            best_loss = saver.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.model,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'args': args,
                    'gp': args.gp,
                },
                epoch=epoch + 1,
                metric=eval_metrics['eval_loss'])

    except KeyboardInterrupt:
        pass
    if best_loss is not None:
        print('*** Best loss: {0} (epoch {1})'.format(best_loss[1],
                                                      best_loss[0]))
示例#18
0
#!/usr/bin/env python3

from triplet_loss import TripletLoss

print(TripletLoss.__bases__)
tl = TripletLoss(0.2)
print(tl.alpha)
print(sorted(tl.__dict__.keys()))
示例#19
0
        # 模型运算
        # 前向传播过程-拿模型分别跑三张图,生成embedding和loss(在训练阶段的输入是两张图,输出带loss,而验证阶段输入一张图,输出只有embedding)
        anc_embedding = model(anc_img.cuda())
        pos_embedding = model(pos_img.cuda())
        neg_embedding = model(neg_img.cuda())

        anc_embedding = torch.div(anc_embedding, torch.norm(anc_embedding))
        pos_embedding = torch.div(pos_embedding, torch.norm(pos_embedding))
        neg_embedding = torch.div(neg_embedding, torch.norm(neg_embedding))

        # 损失计算
        # 计算这个批次困难样本的三元损失
        # 在159行处,调用triplet_loss完成loss的计算
        triplet_loss = TripletLoss(margin=0.1).forward(anchor=anc_embedding,
                                                       positive=pos_embedding,
                                                       negative=neg_embedding)

        loss = triplet_loss

        # 反向传播过程
        optimizer_model.zero_grad()
        # 调用的是loss的tensor本身的backward
        loss.backward()
        optimizer_model.step()

        # 计算这个epoch内的总三元损失和计算损失所用的样本个数
        triplet_loss_sum += triplet_loss.item()
        sample_num += anc_embedding.shape[0]

    # 计算这个epoch里的平均损失
示例#20
0
def train(args,
          start_batch_idx,
          text_encoder,
          image_encoder,
          optimizer,
          train_loader,
          device='cuda'):
    if args.loss_type == 'hinge':
        criterion = compute_loss
    elif args.loss_type == 'hardmining+hinge':
        triplet_loss = TripletLoss(margin=args.margin)
    elif args.loss_type == 'dynamic_soft_margin':
        criterion = DynamicSoftMarginLoss(is_binary=False,
                                          nbins=args.batch_size // 2)
        criterion = criterion.to(device)

    #####################
    # train
    #####################
    wandb.init(project="cookgan_retrieval_model")
    wandb.config.update(args)

    pbar = range(args.batches)
    pbar = tqdm(pbar,
                initial=start_batch_idx,
                dynamic_ncols=True,
                smoothing=0.3)

    text_encoder.train()
    image_encoder.train()
    if device == 'cuda':
        text_module = text_encoder.module
        image_module = image_encoder.module
    else:
        text_module = text_encoder
        image_module = image_encoder
    train_loader = sample_data(train_loader)

    for batch_idx in pbar:
        txt, img = next(train_loader)
        for i in range(len(txt)):
            txt[i] = txt[i].to(device)
        img = img.to(device)

        txt_feat, _ = text_encoder(*txt)
        img_feat = image_encoder(img)
        bs = img.shape[0]
        if args.loss_type == 'hinge':
            loss = criterion(img_feat, txt_feat, device)
        elif args.loss_type == 'hardmining+hinge':
            label = list(range(0, bs))
            label.extend(label)
            label = np.array(label)
            label = torch.tensor(label).long().to(device)
            loss = global_loss(triplet_loss,
                               torch.cat((img_feat, txt_feat)),
                               label,
                               normalize_feature=True)[0]
        elif args.loss_type == 'dynamic_soft_margin':
            out = torch.cat((img_feat, txt_feat))
            loss = criterion(out)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        wandb.log({'training loss': loss, 'batch_idx': batch_idx})

        if batch_idx % 10_000 == 0:
            ckpt_path = f'{wandb.run.dir}/{batch_idx:>08d}.ckpt'
            save_model(args, batch_idx, text_module, image_module, optimizer,
                       ckpt_path)
示例#21
0
adj_label = adj
adj_label = torch.FloatTensor(adj_label.toarray())

adj_train = preprocess_graph(adj)

# Model and optimizer
model = GCNModelAE_UN(nfeat=features.shape[1],
                      nhid=args.hidden,
                      nclass=args.nclass,
                      dropout=args.dropout)

optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

triplet_loss = TripletLoss(G, False)
indices = []
losses = []


def train(epoch):
    with torch.autograd.set_detect_anomaly(True):
        t = time.time()
        model.train()
        optimizer.zero_grad()
        output = model(features, adj_train)
        # triplet_loss.embeddings = output
        loss = triplet_loss._batch_hard_triplet_loss(output)
        # loss = triplet_loss.get_loss_margin()
        losses.append(loss)
        indices.append(epoch)
示例#22
0
文件: train_v11.py 项目: AlanChaw/WTI
def train_model(epoch, model, optimizer, lr_scheduler, loader, test_loader):
    global GLOBAL_STEP

    test_loader_it = iter(test_loader)

    loss_meter = AverageMeter()
    val_loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    val_acc_meter = AverageMeter()
    positive_distance_meter = AverageMeter()
    negative_distance_meter = AverageMeter()

    model.train()
    model.to(device)

    print('=' * 20 + "Model Training" + '=' * 20)

    triplet_loss_func = TripletLoss(args.margin)
    # cos_sim_func = nn.CosineSimilarity(dim=1, eps=1e-6)
    # loss_func = nn.MSELoss()

    for i, batch in tqdm(enumerate(loader)):
        start = time.time()
        model.train()
        optimizer.zero_grad()
        model.zero_grad()
        anchor, positive, negative = batch
        anchor_feature = model(anchor)
        positive_feature = model(positive)
        negative_feature = model(negative)
        # positive_sim = cos_sim_func(anchor_feature, positive_feature)
        # negative_sim = cos_sim_func(anchor_feature, negative_feature)
        # pos_sim_loss = loss_func(positive_sim, torch.tensor([1]*len(anchor)).type(torch.FloatTensor).to(device))
        # neg_sim_loss = loss_func(negative_sim, torch.tensor([0]*len(anchor)).type(torch.FloatTensor).to(device))
        triplet_loss = triplet_loss_func(anchor_feature, positive_feature,
                                         negative_feature)
        loss = triplet_loss
        loss.backward()
        grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                   args.grad_clip_bound)
        optimizer.step()

        positive_distance = (anchor_feature -
                             positive_feature).pow(2).sum(1).detach()
        negative_distance = (anchor_feature -
                             negative_feature).pow(2).sum(1).detach()
        acc = torch.gt(negative_distance,
                       positive_distance).type(torch.FloatTensor).mean()
        loss_meter.update(loss.item())
        acc_meter.update(acc.item())
        positive_distance_meter.update(positive_distance.mean().item())
        negative_distance_meter.update(negative_distance.mean().item())

        try:
            batch = next(test_loader_it)
        except:
            test_loader_it = iter(test_loader)
            batch = next(test_loader_it)
        eval_loss, eval_acc, _, _ = batch_eval(batch, model)

        val_loss_meter.update(eval_loss.item())
        val_acc_meter.update(eval_acc.item())
        end = time.time()
        used_time = end - start

        if (GLOBAL_STEP) % args.log_every == 0:

            lr = optimizer.param_groups[0]['lr']
            display = 'epoch=' + str(epoch) + \
                      '\tglobal_step=%d' % (GLOBAL_STEP) + \
                      '\tloss=%.4f' % (loss_meter.val) + \
                      '\tloss_avg=%.4f' % (loss_meter.avg) + \
                      '\tval_loss=%.4f' % (val_loss_meter.avg) + \
                      '\tpos_avg=%.4f' % (positive_distance_meter.avg) + \
                      '\tneg_avg=%.4f' % (negative_distance_meter.avg) + \
                      '\tacc=%.4f' % (acc_meter.avg) + \
                      '\tval_acc=%.4f' % (val_acc_meter.avg)+ \
                      '\tlr=%.6f' % (lr) + \
                      '\t|g|=%.4f' % (grad_norm) + \
                      '\ttime=%.2fit/s' % (1. / used_time)
            tqdm.write(display)
            save_mode(epoch=epoch,
                      model=model,
                      optimizer=optimizer,
                      lr_scheduler=lr_scheduler)
        GLOBAL_STEP += 1
    return
#!/usr/bin/env python3

import numpy as np
import tensorflow as tf
from triplet_loss import TripletLoss

np.random.seed(0)
tl = TripletLoss(0.2)
A = np.random.uniform(0, 1, (2, 128))
P = np.random.uniform(0, 1, (2, 128))
N = np.random.uniform(0, 1, (2, 128))

with tf.Session() as sess:
    loss = tl.triplet_loss([A, P, N])
    print(type(loss))
    print(loss.eval())
示例#24
0
        embedding = embedding * alpha

        return embedding

if __name__ == '__main__':
    import pdb
    #pdb.set_trace()
    #pwd = os.path.abspath('./')
    start_epoch = 0
    model = Resnet18Triplet(pretrained=False,embedding_dimension = 256)
    if torch.cuda.is_available():
        model.cuda()
        print('Using single-gpu training.')

    # loss fun
    loss_fun = TripletLoss(margin=0.5).cuda()

    def adjust_learning_rate(optimizer, epoch):
        if epoch<30:
            lr =  0.125
        elif (epoch>=30) and (epoch<60):
            lr = 0.0625
        elif (epoch >= 60) and (epoch < 90):
            lr = 0.0155
        elif (epoch >= 90) and (epoch < 120):
            lr = 0.003
        elif (epoch>=120) and (epoch<160):
            lr = 0.0001
        else:
            lr = 0.00006
        for param_group in optimizer.param_groups:
示例#25
0
    def __init__(self,
                 model='test',
                 model_name='resnet50_ibn_a',
                 model_path='',
                 last_stride=1):
        super(Baseline, self).__init__()

        if (model_name == 'resnet50_ibn_a'):
            self.in_planes = 2048
            self.base = resnet50_ibn_a(last_stride=last_stride)
            print('Model name = {}'.format(model_name))
        if (model_name == 'mobilfacenet'):
            self.in_planes = 512
            self.base = MobileFaceNet(512)
            print('Model name = {}'.format(model_name))
        if (model_name == 'model_ir_se50'):
            self.in_planes = 512
            self.base = Backbone(50, 'ir_se')
            print('Model name = {}'.format(model_name))
        if (model_name == 'se_resnet50'):
            self.in_planes = 2048
            self.base = SENet(block=SEResNetBottleneck,
                              layers=[3, 4, 6, 3],
                              groups=1,
                              reduction=16,
                              dropout_p=None,
                              inplanes=64,
                              input_3x3=False,
                              downsample_kernel_size=1,
                              downsample_padding=0,
                              last_stride=last_stride)
            print('Model name = {}'.format(model_name))

        if (model_name == 'AlexNet'):
            self.base = AlexNet()
            # self.base.load_param('alexnet-owt-4df8aa71.pth')
            self.base.apply(weight_init)

        if (model_name == 'MiniXception'):
            self.base = MiniXception()
            self.base.apply(weight_init)

        if (model_name == 'ConvNet'):
            self.base = ConvNet()
            self.base.apply(weight_init)

        if (model_name == 'PretrConvNet'):
            self.base = PretrConvNet()
            self.base.apply(weight_init)

        if (model_name == 'MixNet'):
            self.base = MixNet()

        # if(model == 'train'):
        # self.base.load_param(model_path)

        self.fc_lms = nn.Linear(136, 256, bias=True)
        self.bn_lms = nn.BatchNorm1d(256)

        self.fc_head = nn.Linear(512, 250, bias=False)
        self.bn_head = nn.BatchNorm1d(250)
        self.bn_head.bias.requires_grad_(False)  # no shift
        self.classifier = nn.Linear(250, 50, bias=False)

        self.relu = nn.ReLU(inplace=True)

        self.fc_lms.apply(weights_init_kaiming)
        self.bn_lms.apply(weights_init_kaiming)

        self.fc_head.apply(weights_init_classifier)
        self.bn_head.apply(weights_init_kaiming)
        self.classifier.apply(weights_init_classifier)

        self.dropout = nn.Dropout(p=0.6)

        weight = [
            0.3, 0.3, 0.3, 0.3, 0.37, 0.43, 0.37, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
            0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.38, 0.3,
            0.3, 0.3, 0.46, 0.3, 0.3, 0.52, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
            0.3, 0.47, 0.3, 0.3, 0.57, 0.3, 0.48, 0.3, 0.68, 0.3, 0.3
        ]

        weight = np.array(weight) * 10
        self.weight = torch.tensor(weight, dtype=torch.float32).cuda()

        self.criterion = nn.CrossEntropyLoss()
        self.xent = CrossEntropyLabelSmooth(num_classes=50)
        self.triplet = TripletLoss(0.3)
        neg_img = batch_sample['neg_img'].to(device)

        # 模型运算
        # 前向传播过程-拿模型分别跑三张图,生成embedding和loss(在训练阶段的输入是两张图,输出带loss,而验证阶段输入一张图,输出只有embedding)
        anc_embedding = model(anc_img)
        pos_embedding = model(pos_img)
        neg_embedding = model(neg_img)

        anc_embedding = torch.div(anc_embedding, torch.norm(anc_embedding))
        pos_embedding = torch.div(pos_embedding, torch.norm(pos_embedding))
        neg_embedding = torch.div(neg_embedding, torch.norm(neg_embedding))

        # 损失计算
        # 计算这个批次困难样本的三元损失

        triplet_loss = TripletLoss(0.1)
        loss = triplet_loss(anc_embedding,
                            pos_embedding,
                            neg_embedding,
                            reduction='mean')

        # 反向传播过程
        optimizer_model.zero_grad()
        loss.backward()
        optimizer_model.step()

        # update the optimizer learning rate
        adjust_learning_rate(optimizer_model, epoch)

        # 计算这个epoch内的总三元损失和计算损失所用的样本个数
        triplet_loss_sum += loss.item()
示例#27
0
def main():
    # init model, ResNet18() can be also used here for training
    # model = WideResNet().to(device)
    if args.network == 'smallCNN':
        model = SmallCNN().to(device)
    elif args.network == 'wideResNet':
        model = WideResNet().to(device)
    elif args.network == 'resnet':
        model = ResNet().to(device)
    else:
        model = VGG(args.network, num_classes=10).to(device)

    sys.stdout = Logger(os.path.join(args.log_dir, args.log_file))
    print(model)
    criterion_tla = TripletLoss(10, args.feat_size)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    if args.fine_tune:
        base_dir = args.base_dir
        state_dict = torch.load("{}/{}_ep{}.pt".format(base_dir, args.base_model, args.checkpoint))
        opt = torch.load("{}/opt-{}_ep{}.tar".format(base_dir, args.base_model, args.checkpoint))
        model.load_state_dict(state_dict)
        optimizer.load_state_dict(opt)



    natural_acc = []
    robust_acc = []

    for epoch in range(1, args.epochs + 1):
        # adjust learning rate for SGD
        adjust_learning_rate(optimizer, epoch)

        start_time = time.time()

        # adversarial training
        train(model, device, train_loader, optimizer,
              criterion_tla, epoch)

        # evaluation on natural examples
        print('================================================================')
        print("Current time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        # eval_train(model, device, train_loader)
        # eval_test(model, device, test_loader)
        natural_err_total, robust_err_total = eval_adv_test_whitebox(model, device, test_loader)
        with open(os.path.join(stats_dir, '{}.txt'.format(args.save_model)), "a") as f:
            f.write("{} {} {}\n".format(epoch, natural_err_total, robust_err_total))

        print('using time:', datetime.timedelta(seconds=round(time.time() - start_time)))

        natural_acc.append(natural_err_total)
        robust_acc.append(robust_err_total)


        file_name = os.path.join(stats_dir, '{}_stat{}.npy'.format(args.save_model, epoch))
        # np.save(file_name, np.stack((np.array(self.train_loss), np.array(self.test_loss),
        #                              np.array(self.train_acc), np.array(self.test_acc),
        #                              np.array(self.elasticity), np.array(self.x_grads),
        #                              np.array(self.fgsms), np.array(self.pgds),
        #                              np.array(self.cws))))
        np.save(file_name, np.stack((np.array(natural_acc), np.array(robust_acc))))

        # save checkpoint
        if epoch % args.save_freq == 0:
            torch.save(model.state_dict(),
                       os.path.join(model_dir, '{}_ep{}.pt'.format(args.save_model, epoch)))
            torch.save(optimizer.state_dict(),
                       os.path.join(model_dir, 'opt-{}_ep{}.tar'.format(args.save_model, epoch)))
            print("Ep{}: Model saved as {}.".format(epoch, args.save_model))
        print('================================================================')
示例#28
0
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    ''' load dataset and prepare data loader '''
    print('===> prepare dataloader ...')
    train_loader, gallery_loader, query_loader = loader(args)
    ''' load model '''
    print('===> prepare model ...')
    model = Model()
    if mgpus:
        model = torch.nn.DataParallel(model, device_ids=list([0, 1])).cuda()
    elif not mgpus:
        model.cuda()  # load model to gpu
    ''' define loss '''
    criterion = nn.CrossEntropyLoss()
    t_loss = TripletLoss()
    ''' setup optimizer '''
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    ''' setup tensorboard '''
    writer = SummaryWriter(os.path.join(args.save_dir, 'train_info'))
    ''' train model '''
    iters = 0
    best_acc = 0
    best_acc_cos = 0
    long = len(train_loader)
    print_num = int(long / 2)
    lr = args.lr

    print('===> start training ...')
示例#29
0
        return x


use_gpu = torch.cuda.is_available()

model = Model()
#torch.save(model, "./model/mymodel.pth")
#model = models.resnet18(pretrained = True)
#fc_features = model.fc.in_features
#model.fc = torch.nn.Linear(fc_features,5)
#model.conv1 = torch.nn.Conv2d(1,8,7,2,3)

#model = torch.load("./model/resnet18.pth")

#cost = torch.nn.CrossEntropyLoss()
cost = TripletLoss()

model = model.cuda()
cost = cost.cuda()

optimizer = torch.optim.Adam(model.parameters())

epoch_n = 5
for epoch in range(epoch_n):
    running_loss = 0.0
    running_correct = 0.0
    time = 0
    print("Epoch{}/{}".format(epoch, epoch_n))
    print("-" * 10)
    for data in data_loader_train:
        x_train, y_train = data
示例#30
0
#save_path = './model/ft_net_11_15/net_38.pth'
#network.load_state_dict(torch.load(save_path))
#return network

#model_structure = ft_net(len(class_names))
model = ft_net(len(class_names))
#model = load_network(model_structure)

#model = ft_net(len(class_names))
print(model)

if use_gpu:
    model = model.cuda()

#criterion = TripletLoss(margin=0.5)
triplet = TripletLoss(margin=0.3)
criterion = CrossEntropyLabelSmooth(num_classes=len(class_names))

# Decay LR by a factor of 0.1 every 40 epochs

######################################################################
# Train and evaluate
# ^^^^^^^^^^^^^^^^^^
#
# It should take around 1-2 hours on GPU.
#
dir_name = os.path.join('./model', name)
if os.path.isdir(dir_name):
    #os.mkdir(dir_name)
    copyfile('./train_11.py', dir_name + '/train_11.py')
    copyfile('./model.py', dir_name + '/model.py')