Esempio n. 1
0
def main():
    # --------------------------------------model----------------------------------------
    model = net_sia.LResNet50E_IR_Sia(is_gray=args.is_gray)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus

    model = model.to(device)

    args.run_name = utils.get_run_name()  # Dec25-14-53-43_lingxuesong-PC0
    output_dir = os.path.join(args.save_path, args.network)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # ------------------------------------load image---------------------------------------
    if args.is_gray:
        train_transform = transforms.Compose([
            transforms.Grayscale(),
            transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
            transforms.Normalize(mean=(0.5, ), std=(0.5, ))
        ])  # gray
    else:
        train_transform = transforms.Compose([
            transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
            transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                 std=(0.5, 0.5,
                                      0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
        ])
    train_loader = torch.utils.data.DataLoader(dset.ImageList(
        root=args.root_path,
        fileList=args.train_list,
        transform=train_transform),
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('length of train Database: ' + str(len(train_loader.dataset)) +
          ' Batches: ' + str(len(train_loader)))
    # ----------------------------------------train----------------------------------------
    extract(train_loader, model, args.weight_model, output_dir)
    print('Finished Extracting')
Esempio n. 2
0
def main():
    # --------------------------------------model----------------------------------------
    model = net_sia.LResNet50E_IR_Sia(is_gray=args.is_gray)
    model_eval = net_sia.LResNet50E_IR_Sia(is_gray=args.is_gray)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    # 512 is dimension of feature
    classifier = {
        'MCP': layer.MarginCosineProduct(512, args.num_class),
        'AL': layer.AngleLinear(512, args.num_class),
        'L': torch.nn.Linear(512, args.num_class, bias=False)
    }[args.classifier_type]

    classifier.load_state_dict(torch.load(args.weight_fc))

    print(os.environ['CUDA_VISIBLE_DEVICES'], args.cuda)

    pretrained = torch.load(args.weight_model)
    pretrained_dict = pretrained['model_state_dict']
    model_dict = model.state_dict()
    model_eval_dict = model_eval.state_dict()
    for k, v in pretrained_dict.items():
        if k in model_dict:
            model_dict[k].copy_(v)

    del pretrained
    del pretrained_dict
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        # classifier ckpt only save model info
        classifier.load_state_dict(torch.load(args.resume_fc))
    print(model)
    model = torch.nn.DataParallel(model).to(device)
    model_eval = model_eval.to(device)
    classifier = classifier.to(device)

    args.run_name = utils.get_run_name()
    output_dir = os.path.join(args.save_path, args.run_name.split("_")[0])
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # ------------------------------------load image---------------------------------------
    if args.is_gray:
        train_transform = transforms.Compose([
            transforms.Grayscale(),
            transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
            transforms.Normalize(mean=(0.5, ), std=(0.5, ))
        ])  # gray
    else:
        train_transform = transforms.Compose([
            transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
            transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                 std=(0.5, 0.5,
                                      0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
        ])
        valid_transform = transforms.Compose([
            transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
            transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                 std=(0.5, 0.5,
                                      0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
        ])
    train_loader = torch.utils.data.DataLoader(dset.ImageList(
        root=args.root_path,
        fileList=args.train_list,
        transform=train_transform),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=False,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(dset.ImageList(
        root=args.root_path,
        fileList=args.valid_list,
        transform=valid_transform),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=False,
                                             drop_last=False)

    print('length of train Database: ' + str(len(train_loader.dataset)) +
          ' Batches: ' + str(len(train_loader)))
    print('length of valid Database: ' + str(len(val_loader.dataset)) +
          ' Batches: ' + str(len(val_loader)))
    print('Number of Identities: ' + str(args.num_class))
    # Get a batch of training data, (img, img_occ, label)
    ''' 
    inputs, inputs_occ, imgPair, targets = next(iter(train_loader)) 
    out = torchvision.utils.make_grid(inputs)
    out_occ = torchvision.utils.make_grid(inputs_occ)
    
    mean = torch.tensor((0.5,0.5,0.5), dtype=torch.float32)
    std = torch.tensor((0.5,0.5,0.5), dtype=torch.float32)
    utils.imshow(out, mean, std, title=str(targets))
    plt.savefig(output_dir + '/train.png')
    utils.imshow(out_occ, mean, std, title=str(targets))
    plt.savefig(output_dir + '/train_occ.png')
    '''
    #---------------------------------------params setting-----------------------------------
    for name, param in model.named_parameters():
        if 'layer' in name or 'conv1' in name or 'bn1' in name or 'prelu1' in name:
            param.requires_grad = False
        else:
            param.requires_grad = True

    print("Params to learn:")
    params_to_update = []
    params_to_stay = []
    for name, param in model.named_parameters():
        if param.requires_grad == True:
            if 'sia' in name:
                params_to_update.append(param)
                print("Update \t", name)
            else:
                params_to_stay.append(param)
                print("Stay \t", name)

    for name, param in classifier.named_parameters():
        param.requires_grad = True
        params_to_stay.append(param)
        print("Stay \t", name)
    #--------------------------------loss function and optimizer-----------------------------
    cfg = configurations[args.config]
    criterion = torch.nn.CrossEntropyLoss().to(device)
    criterion2 = torch.nn.L1Loss(reduction='mean').to(device)
    optimizer = torch.optim.SGD([{
        'params': params_to_stay,
        'lr': 0,
        'weight_decay': 0,
        'momentum': 0
    }, {
        'params': params_to_update
    }],
                                lr=cfg['lr'],
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    start_epoch = 1
    if args.resume:
        optimizer.load_state_dict(checkpoint['optim_state_dict'])
        start_epoch = checkpoint['epoch']
        del checkpoint
    # ----------------------------------------train----------------------------------------
    save_ckpt(model, 0, optimizer, output_dir +
              '/CosFace_0_checkpoint.pth')  # Not resumed, pretrained~
    for epoch in range(start_epoch, cfg['epochs'] + 1):
        train(train_loader, model, classifier, criterion, criterion2,
              optimizer, epoch, cfg['step_size'], cfg['lr'])
        save_ckpt(model, epoch, optimizer,
                  output_dir + '/CosFace_' + str(epoch) + '_checkpoint.pth')
        print('Validating on valid set...')
        valid(val_loader, model_eval,
              output_dir + '/CosFace_' + str(epoch) + '_checkpoint.pth',
              classifier, criterion, criterion2)
    print('Finished Training')
Esempio n. 3
0
parser.add_argument('--patience', type=int, default=5)
parser.add_argument('--grad_clip', type=float, default=1.0)
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--save_model', action='store_true')
args = parser.parse_args()

assert args.model in ['bow', 'lstm', 'gru', 'cnn', 'transformer']
assert args.pool_mode in ['mean', 'max', 'weighted_mean']
assert args.loss in ['softmax', 'cosine']

if args.seed == None:
    args.seed = random.randint(0, 999)

args = utils.handle_args(args)

run_name = utils.get_run_name(args)

run_path = os.path.join('runs/', *run_name)

assert not os.path.exists(run_path)

os.makedirs(run_path)

params_path = os.path.join(run_path, 'params.txt')
results_path = os.path.join(run_path, 'results.txt')

with open(params_path, 'w+') as f:
    for param, val in vars(args).items():
        f.write(f'{param}\t{val}\n')

with open(results_path, 'w+') as f:
Esempio n. 4
0
        os.path.join(args.bold5000_folder, 'stimuli', s) for s in stimuli
    ]
    features = condition_features(stimuli, feat_extractor)

    cv_r = []
    cv = KFold(n_splits=5, shuffle=True, random_state=27)
    for train_idx, val_idx in cv.split(features):
        features_train, features_val = features[train_idx], features[val_idx]
        voxel_pcs_train, voxel_pcs_val = voxel_pcs[train_idx], voxel_pcs[
            val_idx]
        _, r = grad_regression(torch.from_numpy(features_train),
                               torch.from_numpy(voxel_pcs_train),
                               torch.from_numpy(features_val),
                               torch.from_numpy(voxel_pcs_val),
                               l2_penalty=args.l2)
        cv_r.append(r)
    print('\nFinal Mean r: {:.4f}'.format(np.mean(cv_r)))

    w, _ = grad_regression(torch.from_numpy(features),
                           torch.from_numpy(voxel_pcs),
                           l2_penalty=args.l2)
    regressor = RegressionModel(features.shape[1], voxel_pcs.shape[1])
    regressor.set_params(w[:, :args.n_pcs])

    encoder = Encoder(feat_extractor, regressor)
    encoder.eval()
    run_name = utils.get_run_name('bold5000', args.feature_extractor,
                                  args.feature_name, [args.roi + 'pcs'],
                                  args.subj)
    torch.save(encoder, os.path.join('saved_models', run_name + '.pth'))
Esempio n. 5
0
    np.mean(pca.explained_variance_ratio_.mean())))
pca_encoder = PCAEncoder(feat_extractor, pcs=pca.components_, mean=pca.mean_)

cca = CCA(n_components=n_components, scale=False)

cv = KFold(n_splits=5, shuffle=True, random_state=27)
cv_train_r, cv_val_r = [], []
for train_idx, val_idx in cv.split(pcs):
    pcs_train, pcs_val = pcs[train_idx], pcs[val_idx]
    voxels_train, voxels_val = voxels[train_idx], voxels[val_idx]
    cca.fit(voxels_train, pcs_train)
    x_scores_train, y_scores_train = cca.transform(voxels_train, pcs_train)
    x_scores_val, y_scores_val = cca.transform(voxels_val, pcs_val)
    cv_train_r.append(correlation(x_scores_train, y_scores_train))
    cv_val_r.append(correlation(x_scores_val, y_scores_val))

cv_train_r = np.stack(cv_train_r).mean(axis=0)
cv_val_r = np.stack(cv_val_r).mean(axis=0)
print('Cross-validated score correlations\n'
      'Train: Mean={:.3g} Max={:.3g} Min={:.3g}\n'
      'Val: Mean={:.3g} Max={:.3g} Min={:.3g}\n'.format(
          cv_train_r.mean(), cv_train_r.max(), cv_train_r.min(),
          cv_val_r.mean(), cv_val_r.max(), cv_val_r.min()))

x_scores, y_scores = cca.fit_transform(voxels, pcs)
pca_encoder.cpu()
cca_encoder = CCAEncoder(pca_encoder, cca.y_rotations_.astype(np.float32))
save_name = utils.get_run_name('bold5000', 'alexnet', 'conv_3',
                               ['CCA-{}'.format(roi)])
torch.save(cca_encoder, os.path.join('saved_models', save_name + '.pth'))
Esempio n. 6
0
def main(name, input_path, train_path, test_path, param_path, gpu,
         num_classes):
    logger.setLevel(logging.INFO)
    if gpu:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        # tf.compat.v1.keras.backend.set_session(sess)
        K.set_session(sess)
    # else:
    #     config = tf.ConfigProto(device_count={'GPU': 0})
    #     sess = tf.Session(config=config)
    #     # tf.compat.v1.keras.backend.set_session(sess)
    #     K.set_session(sess)
    run_name, uid = utils.get_run_name(name)
    # create a file handler
    handler = logging.FileHandler(
        path.join(input_path, './logs/{}.log'.format(run_name)))
    handler.setLevel(logging.INFO)

    # create a logging format
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)

    # add the handlers to the logger
    logger.addHandler(handler)

    logger.info('Start Training!')
    logger.info(run_name)
    logger.info('run arguments')
    logger.info(param_path)
    # I inversed order to notice
    # my_data = load_data(args.test, args.train)
    logger.info("train dataset:")
    logger.info(train_path)
    logger.info("test dataset:")
    logger.info(test_path)
    my_data = utils_tf.load_data(train_path, test_path)

    best_folder = path.join(input_path, 'best_models', uid)
    mkdir(best_folder)
    logger.info("RESULTS FOLDER:")
    logger.info(best_folder)
    graph_dir = path.join(input_path, 'graph', uid)
    mkdir(graph_dir)
    logger.info("TENSORBOARD MONITORING:")
    logger.info(graph_dir)

    history_ret, model_trained, res = train(my_data, best_folder, run_name,
                                            graph_dir, param_path, num_classes)

    loss_fn = path.join(best_folder, "loss_{}.png".format(run_name))
    acc_fn = path.join(best_folder, "accuracy_{}.png".format(run_name))
    utils_tf.plot_history(history_ret, loss_fn, acc_fn)

    utils.to_pickle(
        history_ret.history,
        path.join(best_folder, '{}_history.pickle'.format(run_name)))

    handler.flush()
    handler.close()
    logger.removeHandler(handler)
    shutil.move(
        path.join(input_path, './logs/{}.log'.format(run_name)),
        path.join(
            input_path,
            './logs/{}_l_{}_acc_{}.log'.format(run_name,
                                               np.around(res[0], decimals=4),
                                               np.around(res[1], decimals=4))))

    return res, run_name
Esempio n. 7
0
    return condition_features


if __name__ == '__main__':
    parser = ArgumentParser(description='PCA encoder using BOLD5000 study data')
    parser.add_argument('--bold5000_folder', required=True, type=str, help='folder containing the stimuli images')
    parser.add_argument('--feature_extractor', default='alexnet', type=str, help='feature extraction model')
    parser.add_argument('--feature_name', default='conv_3', type=str, help='feature extraction layer')
    parser.add_argument('--n_pcs', default=400, type=int, help='number of pcs to fit')
    args = parser.parse_args()

    if args.feature_extractor == 'alexnet':
        feat_extractor = AlexNet(args.feature_name)
    elif args.feature_extractor == 'vgg16':
        feat_extractor = VGG16(args.feature_name)
    else:
        raise ValueError('unimplemented feature extractor: {}'.format(args.feature_extractor))
    if torch.cuda.is_available():
        feat_extractor.cuda()

    features = condition_features(os.path.join(args.bold5000_folder, 'stimuli'), feat_extractor)

    pca = PCA(n_components=args.n_pcs)
    pca.fit(features)
    print('\nMean Explained Variance: {:.4f}'.format(np.mean(pca.explained_variance_ratio_.mean())))

    encoder = PCAEncoder(feat_extractor, pcs=pca.components_, mean=pca.mean_)
    encoder.eval()
    run_name = utils.get_run_name('bold5000', args.feature_extractor, args.feature_name, ['PCA'])
    torch.save(encoder, os.path.join('saved_models', run_name + '.pth'))