Example #1
0
def main():
    args = parse_train_args()

    set_seed(manualSeed=args.seed)

    if args.optimizer == 'LBFGS':
        sys.exit('Support for training with 1st order methods!')

    device = torch.device(
        "cuda:" + str(args.gpu_id) if torch.cuda.is_available() else "cpu")
    args.device = device

    trainloader, _, num_classes = make_dataset(args.dataset,
                                               args.data_dir,
                                               args.batch_size,
                                               args.sample_size,
                                               SOTA=args.SOTA)

    if args.model == "MLP":
        model = models.__dict__[args.model](hidden=args.width,
                                            depth=args.depth,
                                            fc_bias=args.bias,
                                            num_classes=num_classes).to(device)
    elif args.model == "ResNet18_adapt":
        model = ResNet18_adapt(width=args.width,
                               num_classes=num_classes,
                               fc_bias=args.bias).to(device)
    else:
        model = models.__dict__[args.model](num_classes=num_classes,
                                            fc_bias=args.bias,
                                            ETF_fc=args.ETF_fc,
                                            fixdim=args.fixdim,
                                            SOTA=args.SOTA).to(device)

    train(args, model, trainloader)
Example #2
0
def main(args):
    train_ds, valid_ds, datainfo = datasets.make_dataset(args.dataset_path)

    model = models.BaselineCNN1d(in_channels, 3)
    criterion = nn.MSELoss()
    if args.gpus > 0:
        model = model.cuda()
        criterion = criterion.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    metrics = get_metrics(criterion, datainfo)

    callbacks = [EarlyStopping(metrics[0])]
    trainer = Trainer2(model,
                       optim=optimizer,
                       metrics=metrics,
                       callbacks=callbacks,
                       data_parallel=False,
                       ncols=100,
                       cpus=args.cpus,
                       gpus=args.gpus)
    trainer.fit(train_ds,
                valid_ds,
                start_epoch=args.start_epoch,
                num_epochs=args.num_epochs,
                batch_size=args.batch_size,
                shuffle=False)
Example #3
0
def main(args):
    train_ds, valid_ds, datainfo = datasets.make_dataset(args.dataset_path)

    model = models.RNNBaseline(input_size=6, hidden_size=12, num_layers=3)
    criterion = nn.MSELoss()
    if args.gpus > 0:
        model = model.cuda()
        criterion = criterion.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    head_metric = utils.HeadProjectMetric('head')
    metrics = [ModuleMetric(criterion, 'loss'), head_metric]

    callbacks = [EarlyStopping(metrics)]
    trainer = Trainer2(model, optimizer, metrics, callbacks, ncols=100)
    trainer.fit(train_ds,
                valid_ds,
                start_epoch=args.start_epoch,
                num_epochs=args.num_epochs,
                batch_size=args.batch_size,
                shuffle=True)

    # metric 결과를 csv 파일로 저장
    csv_filename = args.checkpoint_dir / 'metrics.csv'
    print('Write metric result into', csv_filename)
    head_metric.to_csv(csv_filename)
    def init_dataset_iterator_local(self,
                                    features_config,
                                    batch_size=128,
                                    truncate_session_length=20):
        with tf.device('/cpu:0'):
            self.files_placeholder = tf.placeholder(tf.string)

            # Make a dataset
            ds = make_dataset(self.files_placeholder,
                              features_config,
                              batch_size=batch_size,
                              truncate_sequence_length=truncate_session_length)

            # Define an abstract iterator that has the shape and type of our datasets
            iterator = tf.data.Iterator.from_structure(ds.output_types,
                                                       ds.output_shapes)

            # This is an op that gets the next element from the iterator
            self.next_element_op = iterator.get_next()

            # These ops let us switch and reinitialize every time we finish an epoch
            self.iterator_init_op = iterator.make_initializer(ds)
Example #5
0
def main(args):
    train_ds, valid_ds, datainfo = datasets.make_dataset(args.dataset_path)

    model = models.SimpleError()
    criterion = nn.MSELoss()
    if args.gpus > 0:
        model = model.cuda()
        criterion = criterion.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    metrics = [
        ModuleMetric(criterion, 'loss'),
        utils.HeadProjectMetric('head')
    ]

    callbacks = [EarlyStopping(metrics)]
    trainer = Trainer2(model, optimizer, metrics, callbacks, ncols=100)
    trainer.fit(train_ds,
                valid_ds,
                start_epoch=args.start_epoch,
                num_epochs=args.num_epochs,
                batch_size=args.batch_size,
                shuffle=True)
def main():
    args = parse_train_args()

    if args.optimizer != 'LBFGS':
        sys.exit('Support for training with LBFGS only!')

    device = torch.device(
        "cuda:" + str(args.gpu_id) if torch.cuda.is_available() else "cpu")
    args.device = device

    trainloader, _, num_classes = make_dataset(args.dataset,
                                               args.data_dir,
                                               args.batch_size,
                                               args.sample_size,
                                               SOTA=args.SOTA)

    model = models.__dict__[args.model](num_classes=num_classes,
                                        fc_bias=args.bias,
                                        ETF_fc=args.ETF_fc,
                                        fixdim=args.fixdim,
                                        SOTA=args.SOTA).to(device)
    print('# of model parameters: ' + str(count_network_parameters(model)))

    train(args, model, trainloader)
def main():
    args = parse_args()
    config_path = args.config_file_path

    config = get_config(config_path, new_keys_allowed=True)

    config.defrost()
    config.experiment_dir = os.path.join(config.log_dir, config.experiment_name)
    config.tb_dir = os.path.join(config.experiment_dir, 'tb')
    config.model.best_checkpoint_path = os.path.join(config.experiment_dir, 'best_checkpoint.pt')
    config.model.last_checkpoint_path = os.path.join(config.experiment_dir, 'last_checkpoint.pt')
    config.config_save_path = os.path.join(config.experiment_dir, 'segmentation_config.yaml')
    config.freeze()

    init_experiment(config)
    set_random_seed(config.seed)

    train_dataset = make_dataset(config.train.dataset)
    train_loader = make_data_loader(config.train.loader, train_dataset)

    val_dataset = make_dataset(config.val.dataset)
    val_loader = make_data_loader(config.val.loader, val_dataset)

    device = torch.device(config.device)
    model = make_model(config.model).to(device)

    optimizer = make_optimizer(config.optim, model.parameters())
    scheduler = None

    loss_f = make_loss(config.loss)

    early_stopping = EarlyStopping(
        **config.stopper.params
    )

    train_writer = SummaryWriter(log_dir=os.path.join(config.tb_dir, 'train'))
    val_writer = SummaryWriter(log_dir=os.path.join(config.tb_dir, 'val'))

    for epoch in range(1, config.epochs + 1):
        print(f'Epoch {epoch}')
        train_metrics = train(model, optimizer, train_loader, loss_f, device)
        write_metrics(epoch, train_metrics, train_writer)
        print_metrics('Train', train_metrics)

        val_metrics = val(model, val_loader, loss_f, device)
        write_metrics(epoch, val_metrics, val_writer)
        print_metrics('Val', val_metrics)

        early_stopping(val_metrics['loss'])
        if config.model.save and early_stopping.counter == 0:
            torch.save(model.state_dict(), config.model.best_checkpoint_path)
            print('Saved best model checkpoint to disk.')
        if early_stopping.early_stop:
            print(f'Early stopping after {epoch} epochs.')
            break

        if scheduler:
            scheduler.step()

    train_writer.close()
    val_writer.close()

    if config.model.save:
        torch.save(model.state_dict(), config.model.last_checkpoint_path)
        print('Saved last model checkpoint to disk.')
Example #8
0
def regen_dataset(model, in_data_dir, in_gt_dir, out_data_dir, out_gt_dir,
                  sample_per_file, size_input, transform_joint, transform_img,
                  transform_target, transform_reverse, test_batch_size,
                  max_test_per_file, prec_th, rec_th):
    import scipy.misc
    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset(in_data_dir, in_gt_dir, do_copy=False)

    rm_old_mk_new_dir(out_data_dir)
    rm_old_mk_new_dir(out_gt_dir)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    for count_files, file_path in enumerate(file_list):
        sample_saved, sample_tested = 0, 0
        img = datasets.default_loader(file_path[0], is_target=False)
        gt = datasets.default_loader(file_path[1], is_target=True)

        while sample_tested < max_test_per_file:
            input_, target = None, None
            input_, target = transform_joint(img, gt)
            input_, target = transform_img(input_), transform_target(target)
            input_, target = input_.unsqueeze(0), target.unsqueeze(0)
            for b in range(test_batch_size):
                tmp_input, tmp_target = transform_joint(img, gt)
                tmp_input, tmp_target = transform_img(
                    tmp_input), transform_target(tmp_target)
                tmp_input = tmp_input.unsqueeze(0)
                input_ = torch.cat((input_, tmp_input), dim=0)
                target = torch.cat((target, tmp_target), dim=0)

            sample_tested += test_batch_size

            input_var = torch.autograd.Variable(input_, volatile=True)
            target = target.long()

            data_time.update(time.time() - time_start)
            # data loading time

            time_start = time.time()
            # time reset
            # compute output
            output = model(input_var)
            gpu_time.update(time.time() - time_start)
            # computation time
            time_start = time.time()
            # time reset

            prec_batch, recall_batch = get_prec_recall_batch(
                output.data.cpu(), target)

            ind_to_save = (prec.lt(prec_th) * recall.lt(rec_th)).eq(1)
            # binary
            for i in range(ind_to_save.size(0)):
                if ind_to_save[i] == 0:
                    continue

                out_img = transform_reverse(input_[i, :, :, :]).numpy()
                out_img = np.around(out_img * 255).astype(np.uint8)
                out_img = np.transpose(out_img, (1, 2, 0))
                out_gt = (target[i, :, :].numpy() * 255).astype(np.uint8)

                count += 1
                fname = str(count) + ".png"
                scipy.misc.imsave(os.path.join(out_data_dir, fname), out_img)
                scipy.misc.imsave(os.path.join(out_gt_dir, fname), out_gt)

            write_time.update(time.time() - time_start)
            # data loading time
            time_start = time.time()
            # time reset

            print('File (Tested): [{0}({1})]\t'
                  'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                      count_files + 1,
                      sample_tested,
                      gpu_time=gpu_time,
                      data_time=data_time,
                      write_time=write_time))
Example #9
0
from utils.filereader import read_inputs
from datasets import make_dataset
from roaddecider import Decider

samples1 = read_inputs(
    '/home/wifi/PycharmProjects/data-extractor/data_extractor/service/examples',
    'output.csv')
samples2 = read_inputs(
    '/home/wifi/PycharmProjects/data-extractor/data_extractor/service/examples',
    'output2.csv')

decider1 = Decider(inputs=samples1, name='rsu1')
decider2 = Decider(inputs=samples2, name='rsu2')

trainsamples, traintargets = make_dataset()
decider1.clf.fit(trainsamples, traintargets)
decider2.clf.fit(trainsamples, traintargets)

rtargets1 = decider1.verify_road()
rtargets2 = decider2.verify_road()

decider_manager = DeciderManager()
best = decider_manager.compare([decider1, decider2])
print('melhor caminho [geral] : %s' % best.name)
best_per_hour = decider_manager.compare([decider1, decider2], hour=16)
print('melhor caminho [horario, %d] : %s' % (16, best_per_hour.name))

print(decider1.eval(16))
print(decider2.eval(16))
Example #10
0
exportsvg(svgDir, exportDir, exportFormat)'''


def Erosion(img, kernalSize=2, mode=1):
    #img = GetImg(imgPath, mode)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                       (kernalSize, kernalSize))
    #ret,erosion = cv2.threshold(img,,255,cv2.THRESH_BINARY)
    erosion = cv2.erode(img, kernel, iterations=1)
    #_, erosion = cv2.threshold(erosion,127,255,cv2.THRESH_BINARY)
    return erosion


if __name__ == '__main__':
    #originlist = make_dataset('../../../data/manga/modifysvg')
    svglist = make_dataset('../../../data/manga/png')
    #print(len(svglist), len(originlist))
    '''print('stroke')
	for i in range(len(originlist)):
		change_width(originlist[i])'''
    print('export')
    #os.mkdir('../../../data/manga/png1')
    #exportsvg('../../../data/manga/modifysvg', '../../../data/manga/png1')
    #os.mkdir('../../../data/manga/train/lineA/')
    #svglist = make_dataset('../../../data/manga/png1')
    print('resize')
    for i in range(len(svglist)):
        if i % 100 == 0:
            print(svglist[i])
        if i % 17 == 0:
            outf = '../../../data/manga/test/lineA/'
Example #11
0
def main():

    torch.backends.cudnn.deterministic = True
    cudnn.benchmark = True
    #parser = argparse.ArgumentParser(description="ReID Baseline Training")
    #parser.add_argument(
    #"--config_file", default="", help="path to config file", type=str)

    #parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER)

    #args = parser.parse_args()
    config_file = 'configs/baseline_veri_r101_a.yml'
    if config_file != "":
        cfg.merge_from_file(config_file)
    #cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, if_train=True)
    logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR))
    logger.info(config_file)

    if config_file != "":
        logger.info("Loaded configuration file {}".format(config_file))
        with open(config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

    path = 'D:/Python_SMU/Veri/verigms/gms/'
    pkl = {}
    entries = os.listdir(path)
    for name in entries:
        f = open((path + name), 'rb')
        if name == 'featureMatrix.pkl':
            s = name[0:13]
        else:
            s = name[0:3]
        pkl[s] = pickle.load(f)
        f.close

    with open('cids.pkl', 'rb') as handle:
        b = pickle.load(handle)

    with open('index.pkl', 'rb') as handle:
        c = pickle.load(handle)

    train_transforms, val_transforms, dataset, train_set, val_set = make_dataset(
        cfg, pkl_file='index.pkl')

    num_workers = cfg.DATALOADER.NUM_WORKERS
    num_classes = dataset.num_train_pids
    #pkl_f = 'index.pkl'
    pid = 0
    pidx = {}
    for img_path, pid, _, _ in dataset.train:
        path = img_path.split('\\')[-1]
        folder = path[1:4]
        pidx[folder] = pid
        pid += 1

    if 'triplet' in cfg.DATALOADER.SAMPLER:
        train_loader = DataLoader(train_set,
                                  batch_size=cfg.SOLVER.IMS_PER_BATCH,
                                  sampler=RandomIdentitySampler(
                                      dataset.train, cfg.SOLVER.IMS_PER_BATCH,
                                      cfg.DATALOADER.NUM_INSTANCE),
                                  num_workers=num_workers,
                                  pin_memory=True,
                                  collate_fn=train_collate_fn)
    elif cfg.DATALOADER.SAMPLER == 'softmax':
        print('using softmax sampler')
        train_loader = DataLoader(train_set,
                                  batch_size=cfg.SOLVER.IMS_PER_BATCH,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  pin_memory=True,
                                  collate_fn=train_collate_fn)
    else:
        print('unsupported sampler! expected softmax or triplet but got {}'.
              format(cfg.SAMPLER))

    print("train loader loaded successfully")

    val_loader = DataLoader(val_set,
                            batch_size=cfg.TEST.IMS_PER_BATCH,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True,
                            collate_fn=train_collate_fn)
    print("val loader loaded successfully")

    if cfg.MODEL.PRETRAIN_CHOICE == 'finetune':
        model = make_model(cfg, num_class=576)
        model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH)
        print('Loading pretrained model for finetuning......')
    else:
        model = make_model(cfg, num_class=num_classes)

    loss_func, center_criterion = make_loss(cfg, num_classes=num_classes)

    optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                  cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_EPOCHS,
                                  cfg.SOLVER.WARMUP_METHOD)

    print("model,optimizer, loss, scheduler loaded successfully")

    height, width = cfg.INPUT.SIZE_TRAIN

    log_period = cfg.SOLVER.LOG_PERIOD
    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    eval_period = cfg.SOLVER.EVAL_PERIOD

    device = "cuda"
    epochs = cfg.SOLVER.MAX_EPOCHS

    logger = logging.getLogger("reid_baseline.train")
    logger.info('start training')

    if device:
        if torch.cuda.device_count() > 1:
            print('Using {} GPUs for training'.format(
                torch.cuda.device_count()))
            model = nn.DataParallel(model)
        model.to(device)

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    evaluator = R1_mAP_eval(len(dataset.query),
                            max_rank=50,
                            feat_norm=cfg.TEST.FEAT_NORM)
    model.base._freeze_stages()
    logger.info('Freezing the stages number:{}'.format(cfg.MODEL.FROZEN))

    data_index = search(pkl)
    print("Ready for training")

    for epoch in range(1, epochs + 1):
        start_time = time.time()
        loss_meter.reset()
        acc_meter.reset()
        evaluator.reset()
        scheduler.step()
        model.train()
        for n_iter, (img, label, index, pid, cid) in enumerate(train_loader):
            optimizer.zero_grad()
            optimizer_center.zero_grad()
            #img = img.to(device)
            #target = vid.to(device)
            trainX, trainY = torch.zeros(
                (train_loader.batch_size * 3, 3, height, width),
                dtype=torch.float32), torch.zeros(
                    (train_loader.batch_size * 3), dtype=torch.int64)

            for i in range(train_loader.batch_size):
                labelx = label[i]
                indexx = index[i]
                cidx = pid[i]
                if indexx > len(pkl[labelx]) - 1:
                    indexx = len(pkl[labelx]) - 1

                a = pkl[labelx][indexx]
                minpos = np.argmin(ma.masked_where(a == 0, a))
                pos_dic = train_set[data_index[cidx][1] + minpos]
                #print(pos_dic[1])
                neg_label = int(labelx)

                while True:
                    neg_label = random.choice(range(1, 770))
                    if neg_label is not int(labelx) and os.path.isdir(
                            os.path.join('D:/datasets/veri-split/train',
                                         strint(neg_label))) is True:
                        break

                negative_label = strint(neg_label)
                neg_cid = pidx[negative_label]
                neg_index = random.choice(range(0, len(pkl[negative_label])))

                neg_dic = train_set[data_index[neg_cid][1] + neg_index]
                trainX[i] = img[i]
                trainX[i + train_loader.batch_size] = pos_dic[0]
                trainX[i + (train_loader.batch_size * 2)] = neg_dic[0]
                trainY[i] = cidx
                trainY[i + train_loader.batch_size] = pos_dic[3]
                trainY[i + (train_loader.batch_size * 2)] = neg_dic[3]

            #print(trainY)
            trainX = trainX.cuda()
            trainY = trainY.cuda()

            score, feat = model(trainX, trainY)
            loss = loss_func(score, feat, trainY)
            loss.backward()
            optimizer.step()
            if 'center' in cfg.MODEL.METRIC_LOSS_TYPE:
                for param in center_criterion.parameters():
                    param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
                optimizer_center.step()

            acc = (score.max(1)[1] == trainY).float().mean()
            loss_meter.update(loss.item(), img.shape[0])
            acc_meter.update(acc, 1)

            if (n_iter + 1) % log_period == 0:
                logger.info(
                    "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}"
                    .format(epoch, (n_iter + 1), len(train_loader),
                            loss_meter.avg, acc_meter.avg,
                            scheduler.get_lr()[0]))
        end_time = time.time()
        time_per_batch = (end_time - start_time) / (n_iter + 1)
        logger.info(
            "Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]"
            .format(epoch, time_per_batch,
                    train_loader.batch_size / time_per_batch))

        if epoch % checkpoint_period == 0:
            torch.save(
                model.state_dict(),
                os.path.join(cfg.OUTPUT_DIR,
                             cfg.MODEL.NAME + '_{}.pth'.format(epoch)))

        if epoch % eval_period == 0:
            model.eval()
            for n_iter, (img, vid, camid, _, _) in enumerate(val_loader):
                with torch.no_grad():
                    img = img.to(device)
                    feat = model(img)
                    evaluator.update((feat, vid, camid))

            cmc, mAP, _, _, _, _, _ = evaluator.compute()
            logger.info("Validation Results - Epoch: {}".format(epoch))
            logger.info("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10]:
                logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(
                    r, cmc[r - 1]))
def main():
    args = parse_eval_args()

    if args.load_path is None:
        sys.exit('Need to input the path to a pre-trained model!')

    device = torch.device(
        "cuda:" + str(args.gpu_id) if torch.cuda.is_available() else "cpu")
    args.device = device

    trainloader, testloader, num_classes = make_dataset(
        args.dataset, args.data_dir, args.batch_size, args.sample_size)

    if args.model == "MLP":
        model = models.__dict__[args.model](hidden=args.width,
                                            depth=args.depth,
                                            fc_bias=args.bias,
                                            num_classes=num_classes).to(device)
    elif args.model == "ResNet18_adapt":
        model = ResNet18_adapt(width=args.width,
                               num_classes=num_classes,
                               fc_bias=args.bias).to(device)
    else:
        model = models.__dict__[args.model](num_classes=num_classes,
                                            fc_bias=args.bias,
                                            ETF_fc=args.ETF_fc,
                                            fixdim=args.fixdim,
                                            SOTA=args.SOTA).to(device)

    fc_features = FCFeatures()
    model.fc.register_forward_pre_hook(fc_features)

    info_dict = {
        'collapse_metric': [],
        'ETF_metric': [],
        'WH_relation_metric': [],
        'Wh_b_relation_metric': [],
        'W': [],
        'b': [],
        'H': [],
        'mu_G_train': [],
        # 'mu_G_test': [],
        'train_acc1': [],
        'train_acc5': [],
        'test_acc1': [],
        'test_acc5': []
    }

    logfile = open('%s/test_log.txt' % (args.load_path), 'w')
    for i in range(args.epochs):

        model.load_state_dict(
            torch.load(args.load_path + 'epoch_' + str(i + 1).zfill(3) +
                       '.pth'))
        model.eval()

        for n, p in model.named_parameters():
            if 'fc.weight' in n:
                W = p
            if 'fc.bias' in n:
                b = p

        mu_G_train, mu_c_dict_train, train_acc1, train_acc5 = compute_info(
            args, model, fc_features, trainloader, isTrain=True)
        mu_G_test, mu_c_dict_test, test_acc1, test_acc5 = compute_info(
            args, model, fc_features, testloader, isTrain=False)

        Sigma_W = compute_Sigma_W(args,
                                  model,
                                  fc_features,
                                  mu_c_dict_train,
                                  trainloader,
                                  isTrain=True)
        # Sigma_W_test_norm = compute_Sigma_W(args, model, fc_features, mu_c_dict_train, testloader, isTrain=False)
        Sigma_B = compute_Sigma_B(mu_c_dict_train, mu_G_train)

        collapse_metric = np.trace(
            Sigma_W @ scilin.pinv(Sigma_B)) / len(mu_c_dict_train)
        ETF_metric = compute_ETF(W)
        WH_relation_metric, H = compute_W_H_relation(W, mu_c_dict_train,
                                                     mu_G_train)
        if args.bias:
            Wh_b_relation_metric = compute_Wh_b_relation(W, mu_G_train, b)
        else:
            Wh_b_relation_metric = compute_Wh_b_relation(
                W, mu_G_train, torch.zeros((W.shape[0], )))

        info_dict['collapse_metric'].append(collapse_metric)
        info_dict['ETF_metric'].append(ETF_metric)
        info_dict['WH_relation_metric'].append(WH_relation_metric)
        info_dict['Wh_b_relation_metric'].append(Wh_b_relation_metric)

        info_dict['W'].append((W.detach().cpu().numpy()))
        if args.bias:
            info_dict['b'].append(b.detach().cpu().numpy())
        info_dict['H'].append(H.detach().cpu().numpy())

        info_dict['mu_G_train'].append(mu_G_train.detach().cpu().numpy())
        # info_dict['mu_G_test'].append(mu_G_test.detach().cpu().numpy())

        info_dict['train_acc1'].append(train_acc1)
        info_dict['train_acc5'].append(train_acc5)
        info_dict['test_acc1'].append(test_acc1)
        info_dict['test_acc5'].append(test_acc5)

        print_and_save(
            '[epoch: %d] | train top1: %.4f | train top5: %.4f | test top1: %.4f | test top5: %.4f '
            % (i + 1, train_acc1, train_acc5, test_acc1, test_acc5), logfile)

    with open(args.load_path + 'info.pkl', 'wb') as f:
        pickle.dump(info_dict, f)
Example #13
0
def test_cnn(data_nm,
             train_size,
             cv=5,
             use_classes='all',
             composition=None,
             class_weight=None,
             model='simple_cnn',
             batch_size=32,
             epochs=100):
    balanced = False if composition is not None else True
    dataset = make_dataset(data_nm,
                           train_size,
                           use_classes=use_classes,
                           balanced=balanced,
                           composition=composition)
    y = dataset['train_labels']
    y_test = dataset['test_labels']
    dataset = preprocess_dataset(dataset)
    n_classes = len(np.unique(y))

    skf = StratifiedKFold(cv, shuffle=True, random_state=42)
    y_preds, models, histories = [], [], []
    for i, (train_idx,
            val_idx) in enumerate(skf.split(dataset['train_images'], y), 1):
        X_train = dataset['train_images'][train_idx]
        y_train = dataset['train_labels'][train_idx]
        X_val = dataset['train_images'][val_idx]
        y_val = dataset['train_labels'][val_idx]
        if model == 'resnet50':
            model = build_resnet50(X_train[0].shape, n_classes)
        else:
            model = build_simple_cnn(X_train[0].shape, n_classes)
        model.compile(SGD(1e-3, momentum=0.9),
                      loss='categorical_crossentropy',
                      metrics=['acc'])
        hist = model.fit(X_train,
                         y_train,
                         validation_data=[X_val, y_val],
                         batch_size=batch_size,
                         epochs=epochs,
                         verbose=0,
                         class_weight=class_weight)
        val_loss, val_acc = model.evaluate(X_val, y_val, verbose=0)
        print(f'fold{i} loss:{val_loss} acc:{val_acc}')
        y_pred = np.argmax(model.predict(X_val), 1)
        y_pred = pd.Series(y_pred, index=val_idx)
        y_preds.append(y_pred)
        models.append(model)
        histories.append(hist.history)
    y_preds = pd.concat(y_preds).sort_index().values
    y_test_preds = np.array(
        [model.predict(dataset['test_images']) for model in models])
    y_test_preds = y_test_preds.sum(0).argmax(1)
    train_acc = accuracy_score(y, y_preds)
    test_acc = accuracy_score(y_test, y_test_preds)
    print(f'train acc:{train_acc} test acc:{test_acc}')
    history_ = {
        k: np.mean([hist[k] for hist in histories], 0)
        for k in histories[0].keys()
    }
    plot_history(history_)
    return train_acc, test_acc