Ejemplo n.º 1
0
def fit_source(src_model, src_opt, src_trainloader, history, exp_dict):
    # Train Source
    for e in range(history["src_train"][-1]["epoch"], exp_dict["src_epochs"]):
        loss_sum = 0.
        for step, (images, labels) in enumerate(src_trainloader):
            # make images and labels variable
            images = images.cuda()
            labels = labels.squeeze_().cuda()

            # zero gradients for opt
            src_opt.zero_grad()

            # compute loss for critic
            loss = losses.triplet_loss(src_model, {"X": images, "y": labels})

            loss_sum += loss.item()

            # optimize source classifier
            loss.backward()
            src_opt.step()

        loss = loss_sum / step
        print("Source ({}) - Epoch [{}/{}] - loss={:.2f}".format(
            type(src_trainloader).__name__, e, exp_dict["src_epochs"], loss))

        history["src_train"] += [{"loss": loss, "epoch": e}]

        if e % 50 == 0:
            ms.save_model_src(exp_dict, history, src_model, src_opt)

    return history
def calculate_loss(triplet_model, generator, iter_time, batch_size, N_diff,
                   margin):
    """calculate the max loss during Ndiff iterations"""
    max_loss = -np.inf
    ii_Ndiff = 0
    list_loss = []
    ii_counter = 0
    for input_batch in generator:
        outputs_batch = triplet_model.predict_on_batch(input_batch)
        loss_batch = K.eval(K.mean(triplet_loss(outputs_batch, margin=margin)))
        # print('predict on iter', ii_counter, loss_batch)

        if loss_batch > max_loss:
            max_loss = loss_batch

        ii_Ndiff += 1
        if ii_Ndiff >= N_diff:  # every Ndiff iterations append and reset max_loss
            # print(max_loss)
            list_loss.append(max_loss)
            max_loss = -np.inf
            ii_Ndiff = 0

        ii_counter += 1
        if ii_counter >= iter_time:  # after iterating all samples, return mean loss
            return np.mean(list_loss)
Ejemplo n.º 3
0
def fit_source_supervised(tgt_model, tgt_opt, tgt_scheduler,
                          tgt_trainloader_supervised, exp_dict):
    # Train Target supervised
    flag = False
    if tgt_scheduler is None:
        tgt_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            tgt_opt, milestones=[100, 500, 800], gamma=0.1)
        flag = True
    for e in range(exp_dict["tgt_epochs_supervised"]):
        loss_sum = 0.
        for step, (images, labels) in enumerate(tgt_trainloader_supervised):
            # make images and labels variable
            images = images.cuda()
            labels = labels.squeeze_().cuda()

            # zero gradients for opt
            tgt_opt.zero_grad()

            # compute loss for critic
            loss = losses.triplet_loss(tgt_model, {"X": images, "y": labels})

            loss_sum += loss.item()

            # optimize source classifier
            loss.backward()
            tgt_opt.step()

        loss = loss_sum / step
        if flag:
            tgt_scheduler.step()
        print("Target Supervised ({}) - Epoch [{}/{}] - loss={:.6f}".format(
            type(tgt_trainloader_supervised).__name__, e,
            exp_dict["tgt_epochs_supervised"], loss))
Ejemplo n.º 4
0
def train_one_epoch(model,
                    optimizer,
                    dataloader,
                    writer,
                    epoch,
                    device,
                    loss_type='bce',
                    write_steps=50):
    model.train()

    for step, ((context, context_len),
               (answer,
                answer_len)) in enumerate(dataloader,
                                          start=epoch * len(dataloader)):
        # print(context.shape, answer.shape)
        optimizer.zero_grad()
        context_embeddings = model(
            context.to(device))  # [batch_size, emb_size]
        answer_embeddings = model(answer.to(device))  # [batch_size, emb_size]

        if loss_type == 'bce':
            loss = bce(context_embeddings, answer_embeddings)
        elif loss_type == 'triplet':
            loss = triplet_loss(context_embeddings, answer_embeddings)
        else:
            raise NotImplemented('No such loss')

        if step % write_steps == 0:
            print(
                f'Epoch = {epoch}, step = {step}, train_loss = {loss.item()}')
            write_metrics(writer, step, loss.item())
        loss.backward()
        optimizer.step()
def embedding_siamese_1_lstm_1_dense(input_shape):
    """use keras compile"""
    # device = device_lib.list_local_devices()[0].device_type
    embedding_model, triplet_model, outputs = embedding_model_base(input_shape)

    triplet_model.add_loss(K.mean(triplet_loss(outputs, margin=0.15)))
    triplet_model.compile(loss=None, optimizer='adam')

    return embedding_model, triplet_model
def embedding_siamese_2_lstm_1_dense_model_compile(input_shape, output_shape,
                                                   margin):
    """use keras compile"""

    embedding_model, triplet_model, outputs = embedding_model_base_2_lstm_1_dense_base(
        input_shape, output_shape)

    triplet_model.add_loss(K.mean(triplet_loss(outputs, margin=margin)))
    triplet_model.compile(loss=None, optimizer='adam')

    return embedding_model, triplet_model
Ejemplo n.º 7
0
def create_opt(net, labels, global_step, decay_steps=30000, learning_rate_decay_factor=0.5, learning_rate=0.01):
    lr = tf.train.exponential_decay(learning_rate,
                                    global_step,
                                    decay_steps,
                                    learning_rate_decay_factor,
                                    staircase=True,
                                    name='exponential_decay_learning_rate')

    features = tf.squeeze(net, axis=[1, 2])
    dml_loss = triplet_loss(features, labels, create_summaries=True) # TODO: use DML loss
    l2_loss = tf.losses.get_regularization_loss()
    loss = dml_loss + l2_loss
    opt = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss, global_step=global_step)
    return opt, loss, lr
Ejemplo n.º 8
0
def evaluate(model, dataloader, writer, epoch, device, loss_type='bce'):
    contexts = []
    answers = []

    model.eval()
    loss_history = []
    for (context, context_len), (answer, answer_len) in dataloader:
        context_embeddings = model(
            context.to(device))  # [batch_size, emb_size]
        answer_embeddings = model(answer.to(device))  # [batch_size, emb_size]

        if loss_type == 'bce':
            loss = bce(context_embeddings, answer_embeddings)
        elif loss_type == 'triplet':
            loss = triplet_loss(context_embeddings, answer_embeddings)
        else:
            raise NotImplemented('No such loss')
        loss_history.append(loss.item())

        contexts.append(context_embeddings.cpu().detach().numpy())
        answers.append(answer_embeddings.cpu().detach().numpy())

    loss_value = np.mean(loss_history)

    contexts = np.array(contexts).reshape(-1, contexts[-1].shape[-1])
    answers = np.array(answers).reshape(-1, answers[-1].shape[-1])

    emb_size = answers.shape[1]
    faiss_index = faiss.IndexFlat(emb_size)

    faiss_index.verbose = True
    faiss_index.add(answers)
    _, indexes = faiss_index.search(contexts, k=100)

    mrr = calculate_mrr(y_true=np.arange(indexes.shape[0]).reshape(-1, 1),
                        preds=indexes)
    write_metrics(writer,
                  epoch * len(dataloader),
                  loss_value,
                  mrr=mrr,
                  prefix='eval')
    print(
        f'Epoch = {epoch}, step = {epoch * len(dataloader)}, eval_loss = {loss_value}, mrr = {mrr}'
    )
Ejemplo n.º 9
0
def fit_src(model, data_loader, opt):
    loss_sum = 0.
    for step, (images, labels) in enumerate(data_loader):
        # make images and labels variable
        images = images.cuda()
        labels = labels.squeeze_().cuda()

        # zero gradients for opt
        opt.zero_grad()

        # compute loss for critic
        loss = losses.triplet_loss(model, {"X": images, "y": labels})

        loss_sum += loss.item()

        # optimize source classifier
        loss.backward()
        opt.step()

    return {"loss": loss_sum / step}
Ejemplo n.º 10
0
 def loss_func(z, y):
     return losses.triplet_loss(z,
                                y,
                                kind=triplet_kind,
                                margin=args.margin)
input_shape = (cfg['input_size'], cfg['input_size'], 3)

backend_model = load_model("facenet/facenet_keras.h5")
backend_model.load_weights("facenet/weights/facenet_keras_weights.h5")

A = Input(shape=backend_model.input_shape[1:], name='anchor')
P = Input(shape=backend_model.input_shape[1:], name='anchorPositive')
N = Input(shape=backend_model.input_shape[1:], name='anchorNegative')

enc_A = backend_model(A)
enc_P = backend_model(P)
enc_N = backend_model(N)

# Model
tripletModel = Model([A, P, N], [enc_A, enc_P, enc_N])
tripletModel.compile(optimizer='adam', loss=triplet_loss(cfg['alpha']))

## CallBacks
if not os.path.isdir("checkpoints"):
    os.mkdir("checkpoints")

best_loss_model_name = "{}_{}".format(
    cfg['sub_name'],
    datetime.now().strftime(format="%Y%m%d%H%M"))
best_loss_path = os.path.join("checkpoints", best_loss_model_name)

if not os.path.isdir(best_loss_path):
    os.mkdir(best_loss_path)

check_point_saver_best_loss = ModelCheckpoint(os.path.join(
    best_loss_path, "triplet_face_loss.h5"),
def train_embedding_siamese_Ndiff_train_val_routine(list_feature_fold_train,
                                                    labels_fold_train,
                                                    list_feature_fold_val,
                                                    labels_fold_val,
                                                    batch_size,
                                                    input_shape,
                                                    N_diff,
                                                    margin,
                                                    file_path_model,
                                                    file_path_log,
                                                    patience,
                                                    reverse_anchor=False):

    generator_train = generator_triplet_Ndiff(
        list_feature=list_feature_fold_train,
        labels=labels_fold_train,
        batch_size=1,
        shuffle=True,
        reverse_anchor=reverse_anchor,
        N_diff=N_diff)

    generator_val = generator_triplet_Ndiff(list_feature=list_feature_fold_val,
                                            labels=labels_fold_val,
                                            batch_size=1,
                                            shuffle=True,
                                            reverse_anchor=reverse_anchor,
                                            N_diff=N_diff)

    embedding_model, triplet_model = embedding_siamese_1_lstm_1_dense(
        input_shape)

    iter_time_train = len(
        labels_fold_train
    ) * N_diff / batch_size if not reverse_anchor else len(
        labels_fold_train) * 2 * N_diff / batch_size
    iter_time_val = len(
        labels_fold_val) * N_diff / batch_size if not reverse_anchor else len(
            labels_fold_val) * 2 * N_diff / batch_size

    ii_epoch = 0
    ii_patience = 0
    min_val_loss = np.inf
    ii_counter = 0  # batch counter
    ii_Ndiff = 0  # num of diff sample counter
    max_loss = -np.inf  # max loss during Ndiff iterations
    input_batch_max_loss = None  # input batch with max loss
    for input_batch in generator_train:
        outputs_batch = triplet_model.predict_on_batch(input_batch)
        loss_batch = K.eval(K.mean(triplet_loss(outputs_batch, margin=margin)))
        # print(ii_counter, loss_batch)

        if loss_batch > max_loss:
            max_loss = loss_batch
            input_batch_max_loss = input_batch

        ii_Ndiff += 1
        if ii_Ndiff >= N_diff:
            # print('train on iter', ii_counter, max_loss)
            triplet_model.train_on_batch(input_batch_max_loss, None)
            ii_Ndiff = 0
            max_loss = -np.inf
            input_batch_max_loss = None

        ii_counter += 1
        ii_patience += 1
        if ii_counter >= iter_time_train:
            # train_loss = calculate_loss(triplet_model=triplet_model,
            #                             generator=generator_train,
            #                             iter_time=iter_time_train,
            #                             batch_size=batch_size,
            #                             N_diff=N_diff,
            #                             margin=margin)
            val_loss = calculate_loss(triplet_model=triplet_model,
                                      generator=generator_val,
                                      iter_time=iter_time_val,
                                      batch_size=batch_size,
                                      N_diff=N_diff,
                                      margin=margin)

            writeValLossCsv(file_path_log=file_path_log,
                            ii_epoch=ii_epoch,
                            val_loss=val_loss,
                            train_loss=None)

            if val_loss < min_val_loss:
                min_val_loss = val_loss
                ii_patience = 0
                save_model(triplet_model, filepath=file_path_model)
            elif ii_patience >= patience:
                break

            ii_counter = 0
            ii_epoch += 1
Ejemplo n.º 13
0
    print("[+] Dataset initialized successfully")

    # load backbones
    print("[*] Initializing weights...")
    imagenet_net = ResNet34()
    sketches_net = ResNet34()
    # sketches_net.load_state_dict(torch.load(args.sketches_backbone_weights))
    print("[+] Weights loaded")

    print("[*] Adapting output layers...")

    print("[*] Initializing model, loss and optimizer")
    siamese_net = SiameseNetwork(sketches_net, imagenet_net)
    siamese_net.to(args.device)
    optimizer = torch.optim.Adam(siamese_net.parameters(), lr=args.lr)
    triplet_loss = triplet_loss()
    cross_entropy_loss = torch.nn.CrossEntropyLoss()
    print("[+] Model, loss and optimizer were initialized successfully")

    if not args.debug:
        wandb.init(project='homework1-cc7221', entity='p137')
        config = wandb.config
        config.model = siamese_net.__class__.__name__ + "_triplet"
        config.device = device
        config.batch_size = args.batch_size
        config.epochs = args.epochs
        config.learning_rate = args.lr

    print("[*] Training")
    best_avg_acc = 0
    for epoch in range(args.epochs):