示例#1
0
文件: main.py 项目: songshucode/dmcp
def main():
    args = tools.get_args(parser)
    config = tools.get_config(args)
    tools.init(config)
    tb_logger, logger = tools.get_logger(config)
    tools.check_dist_init(config, logger)

    checkpoint = tools.get_checkpoint(config)
    runner = tools.get_model(config, checkpoint)
    loaders = tools.get_data_loader(config)

    if dist.is_master():
        logger.info(config)

    if args.mode == 'train':
        train(config, runner, loaders, checkpoint, tb_logger)
    elif args.mode == 'evaluate':
        evaluate(runner, loaders)
    elif args.mode == 'calc_flops':
        if dist.is_master():
            flops = tools.get_model_flops(config, runner.get_model())
            logger.info('flops: {}'.format(flops))
    elif args.mode == 'calc_params':
        if dist.is_master():
            params = tools.get_model_parameters(runner.get_model())
            logger.info('params: {}'.format(params))
    else:
        assert checkpoint is not None
        from models.dmcp.utils import sample_model
        sample_model(config, runner.get_model())

    if dist.is_master():
        logger.info('Done')
示例#2
0
def main():
    check_folder()
    args = get_args()

    # Can work with any model, but it assumes that the model has a
    # feature method, and a classifier method,
    # model = models.resnet50(pretrained=True)

    model = models.resnet50(pretrained=True).to(device)
    model.fc = nn.Sequential(
        nn.Linear(2048, 2),
    ).to(device)

    model.load_state_dict(torch.load(
        './models/resnet50-epoch6-Acc9715.h5'))

    grad_cam = GradCam(
        model=model, 
        feature_module=model.layer4,
        target_layer_names=['2'], 
        use_cuda=args.use_cuda
    )

    img = cv2.imread(args.image_path, 1)
    img = np.float32(cv2.resize(img, (224, 224))) / 255
    input = preprocess_image(img)

    # If None, returns the map for the highest scoring category.
    # Otherwise, targets the requested index.
    target_index = None
    mask = grad_cam(input, target_index)

    out_path_cam = './out/{}_cam.jpg'.format(args.out_prefix)
    show_cam_on_image(img, mask, out_path_cam)

    gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
    # print(model._modules.items())
    gb = gb_model(input, index=target_index)
    gb = gb.transpose((1, 2, 0))
    cam_mask = cv2.merge([mask, mask, mask])
    cam_gb = deprocess_image(cam_mask*gb)
    gb = deprocess_image(gb)

    cv2.imwrite('./out/{}_gb.jpg'.format(args.out_prefix), gb)
    cv2.imwrite('./out/{}_cam_gb.jpg'.format(args.out_prefix), cam_gb)
    os.system('cp {} ./out/{}_orig.jpg'.format(args.image_path, args.out_prefix))
    return triplet_datasets


def main(args):
    triplet_save_path = p.join(args.path.train_data_dir, "triplet_dataset")

    args.model.tokenizer_name = "xlm-roberta-large"
    args.model.retriever_name = "BM25"

    bm25 = BM25Retrieval(args)
    bm25.get_embedding()

    train_dataset = get_train_dataset(args)
    triplet_datasets = make_triplet_dataset(bm25, train_dataset)

    df = pd.DataFrame(triplet_datasets)

    f = Features({
        "question": Value(dtype="string", id=None),
        "context": Value(dtype="string", id=None),
        "negative": Value(dtype="string", id=None),
    })

    triplet_datasets = Dataset.from_pandas(df, features=f)
    triplet_datasets.save_to_disk(triplet_save_path)


if __name__ == "__main__":
    args = get_args()
    main(args)