Пример #1
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()

    log.info("Creating CoCosNet Model")
    ie_core = IECore()

    gan_model = CocosnetModel(ie_core, args.translation_model,
                              args.translation_model.replace(".xml", ".bin"),
                              args.device)
    seg_model = SegmentationModel(
        ie_core, args.segmentation_model,
        args.segmentation_model.replace(".xml", ".bin"),
        args.device) if args.segmentation_model else None

    log.info("Preparing input data")
    input_data = []
    use_seg = bool(args.input_images) and bool(args.segmentation_model)
    assert use_seg ^ (bool(args.input_semantics) and bool(
        args.reference_semantics)), "Don't know where to get data"
    input_images = get_files(args.input_images)
    input_semantics = get_files(args.input_semantics)
    reference_images = get_files(args.reference_images)
    reference_semantics = get_files(args.reference_semantics)
    number_of_objects = len(reference_images)

    if use_seg:
        samples = [
            input_images, number_of_objects * [''], reference_images,
            number_of_objects * ['']
        ]
    else:
        samples = [
            number_of_objects * [''], input_semantics, reference_images,
            reference_semantics
        ]
    for input_img, input_sem, ref_img, ref_sem in zip(*samples):
        if use_seg:
            in_img = cv2.imread(input_img)
            if in_img is None:
                raise IOError('Image {} cannot be read'.format(input_img))
            input_sem = get_mask_from_image(in_img, seg_model)
            r_img = cv2.imread(ref_img)
            if r_img is None:
                raise IOError('Image {} cannot be read'.format(ref_img))
            ref_sem = get_mask_from_image(r_img, seg_model)
        else:
            input_sem_file = input_sem
            input_sem = cv2.imread(input_sem_file, cv2.IMREAD_GRAYSCALE)
            if input_sem is None:
                raise IOError('Image {} cannot be read'.format(input_sem_file))
            ref_sem_file = ref_sem
            ref_sem = cv2.imread(ref_sem, cv2.IMREAD_GRAYSCALE)
            if ref_sem is None:
                raise IOError('Image {} cannot be read'.format(ref_sem_file))
        input_sem = preprocess_semantics(
            input_sem, input_size=gan_model.input_semantic_size)
        ref_img_file = ref_img
        ref_img = cv2.imread(ref_img_file)
        if ref_img is None:
            raise IOError('Image {} cannot be read'.format(ref_img_file))
        ref_img = preprocess_image(ref_img,
                                   input_size=gan_model.input_image_size)
        ref_sem = preprocess_semantics(
            ref_sem, input_size=gan_model.input_semantic_size)
        input_dict = {
            'input_semantics': input_sem,
            'reference_image': ref_img,
            'reference_semantics': ref_sem
        }
        input_data.append(input_dict)

    log.info("Inference for input")
    outs = [gan_model.infer(**data) for data in input_data]

    log.info("Postprocessing for result")
    results = [postprocess(out) for out in outs]

    save_result(results, args.output_dir)
    log.info("Result image was saved to {}".format(args.output_dir))
def main():
    args = build_argparser().parse_args()

    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    core = Core()

    log.info('Reading Translation model {}'.format(args.translation_model))
    gan_model = CocosnetModel(core, args.translation_model, args.device)
    log.info('The Translation model {} is loaded to {}'.format(
        args.translation_model, args.device))

    log.info('Reading Semantic Segmentation model {}'.format(
        args.segmentation_model))
    seg_model = SegmentationModel(
        core, args.segmentation_model,
        args.device) if args.segmentation_model else None
    log.info('The Semantic Segmentation model {} is loaded to {}'.format(
        args.segmentation_model, args.device))

    input_data = []
    use_seg = bool(args.input_images) and bool(args.segmentation_model)
    assert use_seg ^ (bool(args.input_semantics) and bool(
        args.reference_semantics)), "Don't know where to get data"
    input_images = get_files(args.input_images)
    input_semantics = get_files(args.input_semantics)
    reference_images = get_files(args.reference_images)
    reference_semantics = get_files(args.reference_semantics)
    number_of_objects = len(reference_images)

    if use_seg:
        samples = [
            input_images, number_of_objects * [''], reference_images,
            number_of_objects * ['']
        ]
    else:
        samples = [
            number_of_objects * [''], input_semantics, reference_images,
            reference_semantics
        ]
    for input_img, input_sem, ref_img, ref_sem in zip(*samples):
        if use_seg:
            in_img = cv2.imread(input_img)
            if in_img is None:
                raise IOError('Image {} cannot be read'.format(input_img))
            input_sem = get_mask_from_image(in_img, seg_model)
            r_img = cv2.imread(ref_img)
            if r_img is None:
                raise IOError('Image {} cannot be read'.format(ref_img))
            ref_sem = get_mask_from_image(r_img, seg_model)
        else:
            input_sem_file = input_sem
            input_sem = cv2.imread(input_sem_file, cv2.IMREAD_GRAYSCALE)
            if input_sem is None:
                raise IOError('Image {} cannot be read'.format(input_sem_file))
            ref_sem_file = ref_sem
            ref_sem = cv2.imread(ref_sem, cv2.IMREAD_GRAYSCALE)
            if ref_sem is None:
                raise IOError('Image {} cannot be read'.format(ref_sem_file))
        input_sem = preprocess_semantics(
            input_sem, input_size=gan_model.input_semantic_size)
        ref_img_file = ref_img
        ref_img = cv2.imread(ref_img_file)
        if ref_img is None:
            raise IOError('Image {} cannot be read'.format(ref_img_file))
        ref_img = preprocess_image(ref_img,
                                   input_size=gan_model.input_image_size)
        ref_sem = preprocess_semantics(
            ref_sem, input_size=gan_model.input_semantic_size)
        input_dict = {
            'input_semantics': input_sem,
            'reference_image': ref_img,
            'reference_semantics': ref_sem
        }
        input_data.append(input_dict)

    outs = [gan_model.infer(**data) for data in input_data]

    results = [postprocess(out) for out in outs]

    save_result(results, args.output_dir)
    log.info("Result image was saved to {}".format(args.output_dir))