Ejemplo n.º 1
0
def save_settings(application, exiting):
    temp = {
        "home_dir": home_dir,
        "images": images,
        "image_index": image_index,
        "shuffle_counter": shuffle_counter,
        "num_monitors": num_monitors,
        "virtual_screensize": virtual_screensize,
        "mon_info": mon_info
    }
    cfgp.create_config(temp)
    if exiting:
        application.destroy()
        sys.exit(0)
Ejemplo n.º 2
0
    args = parser.parse_args()

    configFilePath = args.config

    use_gpu = True
    gpu_list = []
    if args.gpu is None:
        use_gpu = False
    else:
        use_gpu = True
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

        device_list = args.gpu.split(",")
        for a in range(0, len(device_list)):
            gpu_list.append(int(a))

    os.system("clear")

    config = create_config(configFilePath)

    cuda = torch.cuda.is_available()
    logger.info("CUDA available: %s" % str(cuda))
    if not cuda and len(gpu_list) > 0:
        logger.error("CUDA is not available but specific gpu id")
        raise NotImplementedError

    parameters = init_all(config, gpu_list, args.checkpoint, "test")

    json.dump(test(parameters, config, gpu_list), open(args.result, "w", encoding="utf8"), ensure_ascii=False,
              sort_keys=True, indent=2)
Ejemplo n.º 3
0
    parser.add_argument('--gpu', '-g', default="0", help="gpu id list")
    parser.add_argument('--checkpoint',
                        default='model/bert/model0.bin',
                        help="checkpoint file path",
                        required=False)
    parser.add_argument('--data',
                        default='/data/',
                        help="data file path",
                        required=False)
    parser.add_argument('--result',
                        default='/output/result.txt',
                        help="result file path",
                        required=False)
    args = parser.parse_args()

    config = create_config(args.config)

    config.set("data", "test_data_path", args.input)
    fs = ",".join(os.listdir(args.input))
    print(fs)
    config.set("data", "test_file_list", fs)

    gpu_list = []
    if args.gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        device_list = args.gpu.split(",")
        for a in range(0, len(device_list)):
            gpu_list.append(int(a))

    parameters = init_all(config, gpu_list, args.checkpoint, "test")
    predict = test(parameters, config, gpu_list)