示例#1
0
            superimage =\
                np.concatenate([superimage, mid_padding, superimage2], axis=0)

        top_padding = np.zeros((128, superimage.shape[1], 3))
        superimage =\
            np.concatenate([top_padding, superimage], axis=0)

        fullpath = '%s/sentence%d.jpg' % (save_dir, startID + j)
        superimage = drawCaption(np.uint8(superimage), captions_batch[j])
        scipy.misc.imsave(fullpath, superimage)


if __name__ == "__main__":
    args = parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.gpu_id != -1:
        cfg.GPU_ID = args.gpu_id
    if args.caption_path is not None:
        cfg.TEST.CAPTION_PATH = args.caption_path

    # Load text embeddings generated from the encoder
    cap_path = cfg.TEST.CAPTION_PATH
    t_file = torchfile.load(cap_path)
    captions_list = t_file.raw_txt
    embeddings = np.concatenate(t_file.fea_txt, axis=0)
    num_embeddings = len(captions_list)
    print('Successfully load sentences from: ', cap_path)
    print('Total number of sentences:', num_embeddings)
    print('num_embeddings:', num_embeddings, embeddings.shape)
    # path to save generated samples
示例#2
0
            superimage =\
                np.concatenate([superimage, mid_padding, superimage2], axis=0)

        top_padding = np.zeros((128, superimage.shape[1], 3))
        superimage =\
            np.concatenate([top_padding, superimage], axis=0)

        fullpath = '%s/sentence%d.jpg' % (save_dir, startID + j)
        superimage = drawCaption(np.uint8(superimage), captions_batch[j])
        scipy.misc.imsave(fullpath, superimage)


if __name__ == "__main__":
    args = parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.gpu_id != -1:
        cfg.GPU_ID = args.gpu_id
    if args.caption_path is not None:
        cfg.TEST.CAPTION_PATH = args.caption_path

    # Load text embeddings generated from the encoder
    cap_path = cfg.TEST.CAPTION_PATH
    t_file = torchfile.load(cap_path)
    captions_list = t_file.raw_txt
    embeddings = np.concatenate(t_file.fea_txt, axis=0)
    num_embeddings = len(captions_list)
    print('Successfully load sentences from: ', cap_path)
    print('Total number of sentences:', num_embeddings)
    print('num_embeddings:', num_embeddings, embeddings.shape)
    # path to save generated samples
示例#3
0
    parser.add_argument('--epoch', dest='epoch', default=600, type=int)
    parser.add_argument('--batch_size',
                        dest='batch_size',
                        default=64,
                        type=int)
    parser.add_argument('--dataset_dir', dest='dataset_dir', type=str)
    # if len(sys.argv) == 1:
    #    parser.print_help()
    #    sys.exit(1)
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = parse_args()
    cfg_from_file("stageI/cfg/flowers.yml")

    cfg.TRAIN.MAX_EPOCH = args.epoch
    cfg.TRAIN.BATCH_SIZE = args.batch_size

    print('Using config:')
    pprint.pprint(cfg)

    ## now = datetime.datetime.now(dateutil.tz.tzlocal())
    ## timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')

    datadir = args.dataset_dir
    dataset = TextDataset(datadir, cfg.EMBEDDING_TYPE, 1)
    filename_test = '%s/test' % (datadir)
    dataset.test = dataset.get_data(filename_test)
    if cfg.TRAIN.FLAG:
示例#4
0
文件: demo.py 项目: pengdake/StackGAN
            mid_padding = np.zeros((64, superimage.shape[1], 3))
            superimage =\
                np.concatenate([superimage, mid_padding, superimage2], axis=0)

        top_padding = np.zeros((128, superimage.shape[1], 3))
        superimage =\
            np.concatenate([top_padding, superimage], axis=0)

        fullpath = '%s/sentence%d.jpg' % (save_dir, startID + j)
        superimage = drawCaption(np.uint8(superimage), captions_batch[j])
        scipy.misc.imsave(fullpath, superimage)


if __name__ == "__main__":
    args = parse_args()
    cfg_from_file("demo/cfg/flowers-demo.yml")
    cfg.GPU_ID = 0
    uid = args.uid
    cfg.TEST.CAPTION_PATH = "Data/flowers/example_captions_%s.t7" % uid
    cfg.TEST.PRETRAINED_MODEL = args.model_path

    # Load text embeddings generated from the encoder
    cap_path = cfg.TEST.CAPTION_PATH
    t_file = torchfile.load(cap_path)
    captions_list = t_file.raw_txt
    embeddings = np.concatenate(t_file.fea_txt, axis=0)
    num_embeddings = len(captions_list)
    print('Successfully load sentences from: ', cap_path)
    print('Total number of sentences:', num_embeddings)
    print('num_embeddings:', num_embeddings, embeddings.shape)
    # path to save generated samples