Esempio n. 1
0
def train_model(model, dataloader, criterion, criterion_hash, optimizer, scheduler, num_epochs, bits, classes, log_file):

    train_codes = calc_train_codes(dataloader, bits, classes)

    for epoch in range(num_epochs):

        model.train()
        ce_loss = 0.0

        for batch_cnt, (inputs, labels, item) in enumerate(dataloader['train']):

            codes = torch.tensor(train_codes[item, :]).float().cuda()
            inputs = inputs.cuda()
            labels = labels.cuda()

            optimizer.zero_grad()
            feature_map, outputs_class, outputs_codes = model(inputs)

            # ------------------------------------------------------------
            attention = torch.sum(feature_map.detach(), dim=1, keepdim=True)
            attention = nn.functional.interpolate(attention, size=(224, 224), mode='bilinear', align_corners=True)
            masks = []
            for i in range(labels.size()[0]):
                threshold = random.uniform(0.9, 1.0)
                mask = (attention[i] < threshold * attention[i].max()).float()
                masks.append(mask)

            masks = torch.stack(masks)
            hide_imgs = inputs * masks
            _, outputs_hide, _ = model(hide_imgs)
            # ------------------------------------------------------------

            loss_class = criterion(outputs_class, labels)
            loss_class_hide = criterion(outputs_hide, labels)
            loss_codes = criterion_hash(outputs_codes, codes)
            loss = loss_class + loss_codes + loss_class_hide  # 0.1*
            loss.backward()
            optimizer.step()
            ce_loss += loss.item() * inputs.size(0)

        epoch_loss = ce_loss / dataloader['train'].total_item_len
        scheduler.step()

        if (epoch+1)%1 == 0:
            ground_q, code_q = eval_turn(model, dataloader['val'])
            ground_d, code_d = eval_turn(model, dataloader['base'])

            labels_onehot_q = label2onehot(ground_q.cpu(), classes)
            labels_onehot_d = label2onehot(ground_d.cpu(), classes)

            map_1 = calc_map_k(torch.sign(code_q), torch.tensor(train_codes).float().cuda(), labels_onehot_q, labels_onehot_d)

            print('epoch:{}:  loss:{:.4f},  MAP:{:.4f}'.format(epoch+1, epoch_loss, map_1))
            log_file.write('epoch:{}:  loss:{:.4f},  MAP:{:.4f}'.format(epoch+1, epoch_loss, map_1) + '\n')
Esempio n. 2
0
    d_loss_real_ = 0
    d_loss_cls_ = 0
    d_loss_fake_ = 0
    d_loss_gp_ = 0
    g_loss_fake_ = 0
    g_loss_cls_ = 0
    g_loss_rec_ = 0
    g_loss_seg_ = 0
    s_loss_ = 0
    ##training mode set
    generator.train()
    for i, (flair, t1, t1ce, t2, seg) in enumerate(train_loader):
        ############################################## discriminator
        ############ real
        label_ = torch.randint(3, (t1.size(0), ))
        label = label2onehot(label_, 3).cuda()
        real = torch.zeros(t1.size(0), t1.size(1), t1.size(2), t1.size(3))
        for i, l in enumerate(label_):
            if l == 0:
                real[i] = flair[i]
            elif l == 1:
                real[i] = t1ce[i]
            elif l == 2:
                real[i] = t1[i]
            else:
                print('erro!!!')
        out_src, out_cls = discriminator(real.float().cuda(),
                                         t2.float().cuda())
        d_loss_real = -torch.mean(out_src.sum([1, 2, 3]))
        d_loss_cls = classification_loss(out_cls, label)
Esempio n. 3
0
def score(emb, startfrom0=False, topk=False, k=2, classifier=classifier):

    # 0. Files
    #embeddings_file = "blogcatalog.embeddings"
    matfile = mat_file
    embeddings_file = emb_file
    output_file = out_file

    # 2. Load labels
    mat = loadmat(matfile)
    A = mat['network']
    graph = sparse2graph(A)
    labels_matrix = mat['group']

    if startfrom0:
        index_align = 0
    else:
        index_align = 1

    if emb is None:
        # 1. Load Embeddings
        embed = numpy.loadtxt(embeddings_file, skiprows=1)
        features_matrix = numpy.asarray([embed[numpy.where(embed[:,0]==node+index_align), 1:][0,0] for node in range(A.shape[0])])
        features_matrix = numpy.reshape(features_matrix, [features_matrix.shape[0], features_matrix.shape[-1]])
    else:
        features_matrix = emb

    shuffles = []
    number_shuffles = k

    for x in range(number_shuffles):
      shuffles.append(skshuffle(features_matrix, labels_matrix))

    # 3. to score each train/test group
    all_results = defaultdict(list)

    all_results_m = []
    training_percents = [0.1, 0.5, 0.9]
    # uncomment for all training percents
    #training_percents = numpy.asarray(range(1,10))*.1
    for train_percent in training_percents:
        for shuf in shuffles:

            X, y = shuf

            training_size = int(train_percent * X.shape[0])

            X_train = X[:training_size, :]
            y_train_ = y[:training_size]

            y_train = [[] for x in xrange(y_train_.shape[0])]

            cy =  y_train_.tocoo()
            for i, j in izip(cy.row, cy.col):
                y_train[i].append(j)

            #mlb = MultiLabelBinarizer()
            #y_train_onehot = mlb.fit_transform(y_train)
            y_train_onehot = label2onehot(y_train, labels_matrix.toarray().shape[1])

            #assert sum(len(l) for l in y_train) == y_train_.nnz

            X_test = X[training_size:, :]
            y_test_ = y[training_size:]

            y_test = [[] for x in xrange(y_test_.shape[0])]

            cy =  y_test_.tocoo()
            for i, j in izip(cy.row, cy.col):
                y_test[i].append(j)

            #y_test_onehot = mlb.fit_transform(y_test)
            y_test_onehot = label2onehot(y_test, labels_matrix.toarray().shape[1])

            if topk:
                if classifier == 'log':
                    clf = TopKRanker(LogisticRegression(max_iter=500,))
                elif classifier == 'svm':
                    clf = TopKRanker(LinearSVC())
            else:
                if classifier == 'log':
                    clf = OneVsRestClassifier(LogisticRegression(max_iter=500,))
                elif classifier == 'svm':
                    clf = OneVsRestClassifier(LinearSVC())

            clf.fit(X_train, y_train_onehot)

            if topk:
                # find out how many labels should be predicted
                top_k_list = [len(l) for l in y_test]
                preds = clf.predict(X_test, top_k_list)
                preds = label2onehot(preds, labels_matrix.toarray().shape[1])
            else:
                preds = clf.predict(X_test)

            results = {}
            averages = ["micro", "macro", "samples", "weighted"]
            for average in averages:
                if (labels_matrix.shape[1] == 1 and average == "samples"):
                    results[average] = 1.0
                else:
                    results[average] = f1_score(y_test_onehot,  preds, average=average)

            all_results[train_percent].append(results)
            all_results_m.append(results)

    m_buf = []
    v_buf = []
    mean_results = defaultdict(list)
    for train_percent in sorted(all_results.keys()):
        res_tmp = {}
        m_tmp = []
        v_tmp = []
        m_tmp.append(train_percent)
        for average in averages:
            res_tmp[average] = numpy.average([all_results[train_percent][j][average] for j in range(k)])
            m_tmp.append(numpy.average([all_results[train_percent][j][average] for j in range(k)]))
            v_tmp.append(numpy.var([all_results[train_percent][j][average] for j in range(k)]))
        mean_results[train_percent].append(res_tmp)
        m_buf.append(m_tmp)
        v_buf.append(v_tmp)

    #if isinstance(os.path.basename(os.path.splitext(embeddings_file)[0]).split('_')[-1], int):
    #    filename = '../result/results_%d.mat' % int(os.path.basename(os.path.splitext(embeddings_file)[0]).split('_')[-1])
    #else:
    #    filename = '../result/results.mat'
    filename = os.path.splitext(output_file)[0]
    filename = '%s_result.mat' % filename
    scipy.io.savemat(filename, mdict={'average': m_buf, 'variance': v_buf, 'origin': all_results_m})

    print 'Averaged crossvalidation results, using embeddings of dimensionality', X.shape[1]
    print '-------------------'
    for train_percent in sorted(mean_results.keys()):
        print 'Train percent:', train_percent
        for x in mean_results[train_percent]:
            print  x
        print '-------------------'
Esempio n. 4
0
    def train(self):
        all_train_iter_total_loss = []
        all_train_iter_corr_loss = []
        all_train_iter_recover_loss = []
        all_train_iter_change_loss = []
        all_train_iter_gan_loss_gen = []
        all_train_iter_gan_loss_dis = []
        all_val_epo_iou = []
        all_val_epo_acc = []
        iter_num = [0]
        epoch_num = []
        num_batches = len(self.train_dataloader)

        for epoch_i in range(self.start_epoch + 1, self.n_epoch):
            iter_total_loss = AverageTracker()
            iter_corr_loss = AverageTracker()
            iter_recover_loss = AverageTracker()
            iter_change_loss = AverageTracker()
            iter_gan_loss_gen = AverageTracker()
            iter_gan_loss_dis = AverageTracker()
            batch_time = AverageTracker()
            tic = time.time()

            # train
            self.OldLabel_generator.train()
            self.Image_generator.train()
            self.discriminator.train()
            for i, meta in enumerate(self.train_dataloader):

                image, old_label, new_label = meta[0].cuda(), meta[1].cuda(
                ), meta[2].cuda()
                recover_pred, feats = self.OldLabel_generator(
                    label2onehot(old_label, self.cfg.DATASET.N_CLASS))
                corr_pred = self.Image_generator(image, feats)

                # -------------------
                # Train Discriminator
                # -------------------
                self.discriminator.set_requires_grad(True)
                self.optimizer_D.zero_grad()

                fake_sample = torch.cat((image, corr_pred), 1).detach()
                real_sample = torch.cat(
                    (image, label2onehot(new_label, cfg.DATASET.N_CLASS)), 1)

                score_fake_d = self.discriminator(fake_sample)
                score_real = self.discriminator(real_sample)

                gan_loss_dis = self.criterion_D(pred_score=score_fake_d,
                                                real_score=score_real)
                gan_loss_dis.backward()
                self.optimizer_D.step()
                self.scheduler_D.step()

                # ---------------
                # Train Generator
                # ---------------
                self.discriminator.set_requires_grad(False)
                self.optimizer_G.zero_grad()

                score_fake = self.discriminator(
                    torch.cat((image, corr_pred), 1))

                total_loss, corr_loss, recover_loss, change_loss, gan_loss_gen = self.criterion_G(
                    corr_pred, recover_pred, score_fake, old_label, new_label)

                total_loss.backward()
                self.optimizer_G.step()
                self.scheduler_G.step()

                iter_total_loss.update(total_loss.item())
                iter_corr_loss.update(corr_loss.item())
                iter_recover_loss.update(recover_loss.item())
                iter_change_loss.update(change_loss.item())
                iter_gan_loss_gen.update(gan_loss_gen.item())
                iter_gan_loss_dis.update(gan_loss_dis.item())
                batch_time.update(time.time() - tic)
                tic = time.time()

                log = '{}: Epoch: [{}][{}/{}], Time: {:.2f}, ' \
                      'Total Loss: {:.6f}, Corr Loss: {:.6f}, Recover Loss: {:.6f}, Change Loss: {:.6f}, GAN_G Loss: {:.6f}, GAN_D Loss: {:.6f}'.format(
                    datetime.now(), epoch_i, i, num_batches, batch_time.avg,
                    total_loss.item(), corr_loss.item(), recover_loss.item(), change_loss.item(), gan_loss_gen.item(), gan_loss_dis.item())
                print(log)

                if (i + 1) % 10 == 0:
                    all_train_iter_total_loss.append(iter_total_loss.avg)
                    all_train_iter_corr_loss.append(iter_corr_loss.avg)
                    all_train_iter_recover_loss.append(iter_recover_loss.avg)
                    all_train_iter_change_loss.append(iter_change_loss.avg)
                    all_train_iter_gan_loss_gen.append(iter_gan_loss_gen.avg)
                    all_train_iter_gan_loss_dis.append(iter_gan_loss_dis.avg)
                    iter_total_loss.reset()
                    iter_corr_loss.reset()
                    iter_recover_loss.reset()
                    iter_change_loss.reset()
                    iter_gan_loss_gen.reset()
                    iter_gan_loss_dis.reset()

                    vis.line(X=np.column_stack(
                        np.repeat(np.expand_dims(iter_num, 0), 6, axis=0)),
                             Y=np.column_stack((all_train_iter_total_loss,
                                                all_train_iter_corr_loss,
                                                all_train_iter_recover_loss,
                                                all_train_iter_change_loss,
                                                all_train_iter_gan_loss_gen,
                                                all_train_iter_gan_loss_dis)),
                             opts={
                                 'legend': [
                                     'total_loss', 'corr_loss', 'recover_loss',
                                     'change_loss', 'gan_loss_gen',
                                     'gan_loss_dis'
                                 ],
                                 'linecolor':
                                 np.array([[255, 0, 0], [0, 255, 0],
                                           [0, 0, 255], [255, 255, 0],
                                           [0, 255, 255], [255, 0, 255]]),
                                 'title':
                                 'Train loss of generator and discriminator'
                             },
                             win='Train loss of generator and discriminator')
                    iter_num.append(iter_num[-1] + 1)

            # eval
            self.OldLabel_generator.eval()
            self.Image_generator.eval()
            self.discriminator.eval()
            with torch.no_grad():
                for j, meta in enumerate(self.valid_dataloader):
                    image, old_label, new_label = meta[0].cuda(), meta[1].cuda(
                    ), meta[2].cuda()
                    recover_pred, feats = self.OldLabel_generator(
                        label2onehot(old_label, self.cfg.DATASET.N_CLASS))
                    corr_pred = self.Image_generator(image, feats)
                    preds = np.argmax(corr_pred.cpu().detach().numpy().copy(),
                                      axis=1)
                    target = new_label.cpu().detach().numpy().copy()
                    self.running_metrics.update(target, preds)

                    if j == 0:
                        color_map1 = gen_color_map(preds[0, :]).astype(
                            np.uint8)
                        color_map2 = gen_color_map(preds[1, :]).astype(
                            np.uint8)
                        color_map = cv2.hconcat([color_map1, color_map2])
                        cv2.imwrite(
                            os.path.join(
                                self.val_outdir, '{}epoch*{}*{}.png'.format(
                                    epoch_i, meta[3][0], meta[3][1])),
                            color_map)

            score = self.running_metrics.get_scores()
            oa = score['Overall Acc: \t']
            precision = score['Precision: \t'][1]
            recall = score['Recall: \t'][1]
            iou = score['Class IoU: \t'][1]
            miou = score['Mean IoU: \t']
            self.running_metrics.reset()

            epoch_num.append(epoch_i)
            all_val_epo_acc.append(oa)
            all_val_epo_iou.append(miou)
            vis.line(X=np.column_stack(
                np.repeat(np.expand_dims(epoch_num, 0), 2, axis=0)),
                     Y=np.column_stack((all_val_epo_acc, all_val_epo_iou)),
                     opts={
                         'legend':
                         ['val epoch Overall Acc', 'val epoch Mean IoU'],
                         'linecolor': np.array([[255, 0, 0], [0, 255, 0]]),
                         'title': 'Validate Accuracy and IoU'
                     },
                     win='validate Accuracy and IoU')

            log = '{}: Epoch Val: [{}], ACC: {:.2f}, Recall: {:.2f}, mIoU: {:.4f}' \
                .format(datetime.now(), epoch_i, oa, recall, miou)
            self.logger.info(log)

            state = {
                'epoch': epoch_i,
                "acc": oa,
                "recall": recall,
                "iou": miou,
                'model_G_N': self.OldLabel_generator.state_dict(),
                'model_G_I': self.Image_generator.state_dict(),
                'model_D': self.discriminator.state_dict(),
                'optimizer_G': self.optimizer_G.state_dict(),
                'optimizer_D': self.optimizer_D.state_dict()
            }
            save_path = os.path.join(self.cfg.TRAIN.OUTDIR, 'checkpoints',
                                     '{}epoch.pth'.format(epoch_i))
            torch.save(state, save_path)
Esempio n. 5
0
train_loader = data.DataLoader(dataset=train_data,
                               batch_size=batch_size,
                               shuffle=True,
                               drop_last=True,
                               num_workers=4)
unet = Unet()
optimizer = torch.optim.Adam(unet.parameters(), lr=0.0001)
unet.cuda()
unet.train()
EPOCH = 30
for epoch in range(EPOCH):
    batch_score = 0
    num_batch = 0
    for i, (flair, t1, t1ce, t2, label) in enumerate(train_loader):
        info_c_ = torch.randint(3, (t1.size(0), ))
        info_c = label2onehot(info_c_, 3).cuda()
        img = torch.zeros(t1.size(0), t1.size(1), t1.size(2), t1.size(3))
        for i, l in enumerate(info_c_):
            if l == 0:
                img[i] = flair[i]
            elif l == 1:
                img[i] = t1ce[i]
            elif l == 2:
                img[i] = t1[i]
        seg = unet(img.float().cuda(), info_c)

        loss = dice_loss(seg, label.float().cuda())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        seg = seg.cpu()