Пример #1
0
def main(args):
    # Parse device ids
    default_dev, *parallel_dev = parse_devices(args.devices)
    all_devs = parallel_dev + [default_dev]
    all_devs = [x.replace('gpu', '') for x in all_devs]
    all_devs = [int(x) for x in all_devs]
    nr_devs = len(all_devs)

    with open(args.list_val, 'r') as f:
        lines = f.readlines()
        nr_files = len(lines)
        if args.num_val > 0:
            nr_files = min(nr_files, args.num_val)
    nr_files_per_dev = math.ceil(nr_files / nr_devs)

    pbar = tqdm(total=nr_files)

    acc_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()

    result_queue = Queue(500)
    procs = []
    for dev_id in range(nr_devs):
        start_idx = dev_id * nr_files_per_dev
        end_idx = min(start_idx + nr_files_per_dev, nr_files)
        proc = Process(target=worker,
                       args=(args, dev_id, start_idx, end_idx, result_queue))
        print('process:{}, start_idx:{}, end_idx:{}'.format(
            dev_id, start_idx, end_idx))
        proc.start()
        procs.append(proc)

    # master fetches results
    processed_counter = 0
    while processed_counter < nr_files:
        if result_queue.empty():
            continue
        (acc, pix, intersection, union) = result_queue.get()
        acc_meter.update(acc, pix)
        intersection_meter.update(intersection)
        union_meter.update(union)
        processed_counter += 1
        pbar.update(1)

    for p in procs:
        p.join()

    # summary
    iou = intersection_meter.sum / (union_meter.sum + 1e-10)
    for i, _iou in enumerate(iou):
        print('class [{}], IoU: {:.4f}'.format(i, _iou))

    print('[Eval Summary]:')
    print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'.format(
        iou.mean(),
        acc_meter.average() * 100))

    print('Evaluation Done!')
def main(args):
    # Parse device ids
    default_dev, *parallel_dev = parse_devices(args.devices)
    all_devs = parallel_dev + [default_dev]
    all_devs = [x.replace('gpu', '') for x in all_devs]
    all_devs = [int(x) for x in all_devs]
    nr_devs = len(all_devs)

    with open(args.list_val, 'r') as f:
        lines = f.readlines()
        nr_files = len(lines)
        if args.num_val > 0:
            nr_files = min(nr_files, args.num_val)
    nr_files_per_dev = math.ceil(nr_files / nr_devs)

    pbar = tqdm(total=nr_files)

    acc_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()

    result_queue = Queue(500)
    procs = []
    for dev_id in range(nr_devs):
        start_idx = dev_id * nr_files_per_dev
        end_idx = min(start_idx + nr_files_per_dev, nr_files)
        proc = Process(target=worker, args=(args, dev_id, start_idx, end_idx, result_queue))
        print('process:%d, start_idx:%d, end_idx:%d' % (dev_id, start_idx, end_idx))
        proc.start()
        procs.append(proc)

    # master fetches results
    processed_counter = 0
    while processed_counter < nr_files:
        if result_queue.empty():
            continue
        (acc, pix, intersection, union) = result_queue.get()
        acc_meter.update(acc, pix)
        intersection_meter.update(intersection)
        union_meter.update(union)
        processed_counter += 1
        pbar.update(1)

    for p in procs:
        p.join()

    iou = intersection_meter.sum / (union_meter.sum + 1e-10)
    for i, _iou in enumerate(iou):
        print('class [{}], IoU: {}'.format(i, _iou))

    print('[Eval Summary]:')
    print('Mean IoU: {:.4}, Accuracy: {:.2f}%'
          .format(iou.mean(), acc_meter.average()*100))

    print('Evaluation Done!')
Пример #3
0
def main(args):
    # Parse device ids
    default_dev, *parallel_dev = parse_devices(args.devices)
    all_devs = parallel_dev + [default_dev]
    all_devs = [int(x.replace('gpu', '')) for x in all_devs]
    nr_devs = len(all_devs)

    print("nr_dev: {}".format(nr_devs))

    nr_files = len(broden_dataset.record_list['validation_my_material'])
    if args.num_val > 0:
        nr_files = min(nr_files, args.num_val)
    nr_files_per_dev = math.ceil(nr_files / nr_devs)

    pbar = tqdm(total=nr_files)

    result_queue = Queue(5)
    procs = []
    for dev_id in range(nr_devs):
        start_idx = dev_id * nr_files_per_dev
        end_idx = min(start_idx + nr_files_per_dev, nr_files)
        proc = Process(target=worker,
                       args=(args, dev_id, start_idx, end_idx, result_queue))
        print('process:%d, start_idx:%d, end_idx:%d' %
              (dev_id, start_idx, end_idx))
        proc.start()
        procs.append(proc)

    # master fetches results
    all_result = []
    for i in range(nr_files):
        all_result.append(result_queue.get())
        pbar.update(1)

    for p in procs:
        p.join()

    benchmark = get_benchmark_result(all_result)

    print('[Eval Summary]:')
    print(benchmark)

    print('Evaluation Done!')
Пример #4
0
def main(args):
    # Parse device ids
    default_dev, *parallel_dev = parse_devices(args.devices)
    all_devs = parallel_dev + [default_dev]
    all_devs = [int(x.replace('gpu', '')) for x in all_devs]
    nr_devs = len(all_devs)

    print("nr_dev: {}".format(nr_devs))

    nr_files = len(broden_dataset.record_list['validation'])
    if args.num_val > 0:
        nr_files = min(nr_files, args.num_val)
    nr_files_per_dev = math.ceil(nr_files / nr_devs)

    pbar = tqdm(total=nr_files)

    result_queue = Queue(500)
    procs = []
    for dev_id in range(nr_devs):
        start_idx = dev_id * nr_files_per_dev
        end_idx = min(start_idx + nr_files_per_dev, nr_files)
        proc = Process(target=worker, args=(args, dev_id, start_idx, end_idx, result_queue))
        print('process:%d, start_idx:%d, end_idx:%d' % (dev_id, start_idx, end_idx))
        proc.start()
        procs.append(proc)

    # master fetches results
    all_result = []
    for i in range(nr_files):
        all_result.append(result_queue.get())
        pbar.update(1)

    for p in procs:
        p.join()

    benchmark = get_benchmark_result(all_result)

    print('[Eval Summary]:')
    print(benchmark)

    print('Evaluation Done!')
Пример #5
0
def create_dist_worker():

    devices = parse_devices(Config.DIST_DEVICES)
    destroy_event = mp.Event()
    # batch_size = Config.MAST_BATCH_SIZE
    # worker_num = Config.MAST_WORKER_NUM
    thread = threading.Thread(target=run_redis_workers_forever,
                              args=(
                                  DISTModel,
                                  Config.DIST_BATCH_SIZE,
                                  0.1,
                                  Config.DIST_WORKER_NUM,
                                  devices,
                                  Config.REDIS_BROKER_URL,
                                  Config.DIST_CHANNEL,
                                  (),
                                  None,
                                  destroy_event,
                              ),
                              daemon=True)
    thread.start()
    return thread, destroy_event
Пример #6
0
def create_mast_worker():
    root = Config.MAST_WORK_DIR
    cfg = MastConfig(os.path.join(root, f'configs/MAST_Configs.yml'))
    content_dir = Config.CONTENT_DIRECTORY
    style_dir = Config.STYLE_DIRECTORY
    stylized_dir = Config.STYLIZATION_DIRECTORY
    os.makedirs(content_dir, exist_ok=True)
    os.makedirs(style_dir, exist_ok=True)
    os.makedirs(stylized_dir, exist_ok=True)
    model_init_args = (
        root,
        cfg,
        content_dir,
        style_dir,
        stylized_dir,
    )
    destroy_event = mp.Event()
    devices = parse_devices(Config.MAST_DEVICES)
    # batch_size = Config.MAST_BATCH_SIZE
    # worker_num = Config.MAST_WORKER_NUM
    thread = threading.Thread(target=run_redis_workers_forever,
                              args=(
                                  MastModel,
                                  Config.MAST_BATCH_SIZE,
                                  0.1,
                                  Config.MAST_WORKER_NUM,
                                  devices,
                                  Config.REDIS_BROKER_URL,
                                  Config.MAST_CHANNEL,
                                  model_init_args,
                                  None,
                                  destroy_event,
                              ),
                              daemon=True)
    thread.start()
    return thread, destroy_event
Пример #7
0
    if cfg.TRAIN.start_epoch > 0:
        cfg.MODEL.weights_enc_query = os.path.join(
            cfg.DIR, 'enc_query_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        cfg.MODEL.weights_enc_memory = os.path.join(
            cfg.DIR, 'enc_memory_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        cfg.MODEL.weights_att_query = os.path.join(
            cfg.DIR, 'att_query_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        cfg.MODEL.weights_att_memory = os.path.join(
            cfg.DIR, 'att_memory_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        cfg.MODEL.weights_decoder = os.path.join(
            cfg.DIR, 'decoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        assert os.path.exists(cfg.MODEL.weights_enc_query) and os.path.exists(cfg.MODEL.weights_enc_memory) and \
            os.path.exists(cfg.MODEL.weights_att_query) and os.path.exists(cfg.MODEL.weights_att_memory) and \
            os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"

    # Parse gpu ids
    gpus = parse_devices(args.gpus)
    gpus = [x.replace('gpu', '') for x in gpus]
    gpus = [int(x) for x in gpus]
    num_gpus = len(gpus)
    cfg.TRAIN.batch_size = num_gpus * cfg.TRAIN.batch_size_per_gpu

    cfg.TRAIN.max_iters = cfg.TRAIN.epoch_iters * cfg.TRAIN.num_epoch
    cfg.TRAIN.running_lr_encoder = cfg.TRAIN.lr_encoder
    cfg.TRAIN.running_lr_decoder = cfg.TRAIN.lr_decoder

    random.seed(cfg.TRAIN.seed)
    torch.manual_seed(cfg.TRAIN.seed)

    main(cfg, gpus)