def load_checkpoints(self, checkpoint_paths, models):
     """
     Either loads a single checkpoint or loads an ensemble of checkpoints
     from `checkpoint_paths`
     """
     assert isinstance(checkpoint_paths, list), \
         "Make sure checkpoint_paths is specified in list format in the\
         yaml file."
     assert isinstance(models, list), \
         "Make sure model_names is specified in list format in the\
         yaml file."
     assert len(checkpoint_paths) == len(models), \
         "The number of checkpoints and models should be the same."
     # single model instances
     if len(checkpoint_paths) == 1:
         try:
             # loading traceable
             self.model = load(checkpoint_paths[0]).cuda().eval()
             print(f"Traced model from {checkpoint_paths}")
         except:
             self.model = load_weights_infer(checkpoint_paths[0],
                                             models[0]).cuda().eval()
             print(f"Loaded model from {checkpoint_paths}")
     # ensembled models
     elif len(checkpoint_paths) > 1:
         try:
             self.model = EnsembleModel(
                 [load(path).cuda().eval() for path in checkpoint_paths])
         except:
             self.model = EnsembleModel([
                 load_weights_infer(path, model).cuda().eval()
                 for (path, model) in zip(checkpoint_paths, models)
             ])
         print(f"Loaded an ensemble from {checkpoint_paths}")
Exemple #2
0
def runPose(image, boxList, video_name=''):
    parser = argparse.ArgumentParser(
        description='''Lightweight human pose estimation python demo.
                           This is just for quick results preview.
                           Please, consider c++ demo for the best performance.'''
    )
    parser.add_argument('--checkpoint-path',
                        type=str,
                        default='openpose.jit',
                        help='path to the checkpoint')
    parser.add_argument('--height-size',
                        type=int,
                        default=256,
                        help='network input layer height size')
    parser.add_argument('--video',
                        type=str,
                        default='',
                        help='path to video file or camera id')
    parser.add_argument('--images',
                        nargs='+',
                        default='./data/test1',
                        help='path to input image(s)')
    parser.add_argument('--cpu',
                        action='store_true',
                        help='run network inference on cpu')
    parser.add_argument('--device',
                        default='0',
                        help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--code_name',
                        type=str,
                        default='None',
                        help='the name of video')
    # parser.add_argument('--track', type=int, default=0, help='track pose id in video')
    # parser.add_argument('--smooth', type=int, default=1, help='smooth pose keypoints')
    args = parser.parse_args()

    args.images = image
    if video_name != '':
        args.code_name = video_name

    if args.video == '' and args.images == '':
        raise ValueError('Either --video or --image has to be provided')

    net = jit.load(r'.\weights\openpose.jit')

    # *************************************************************************
    action_net = jit.load(r'.\action_detect\checkPoint\action.jit')
    # ************************************************************************
    frame_provider = [image]

    run_demo(net, action_net, frame_provider, args.height_size, args.cpu,
             boxList)
Exemple #3
0
def detect_main(video_source = '',image_source = '',video_name=''):
    parser = argparse.ArgumentParser(
        description='''Lightweight human pose estimation python demo.
                           This is just for quick results preview.
                           Please, consider c++ demo for the best performance.''')
    parser.add_argument('--checkpoint-path', type=str, default='openpose.jit',
                        help='path to the checkpoint')
    parser.add_argument('--height-size', type=int, default=256, help='network input layer height size')
    parser.add_argument('--video', type=str, default='', help='path to video file or camera id')
    parser.add_argument('--images', nargs='+',
                        default='',
                        help='path to input image(s)')
    parser.add_argument('--cpu',action='store_true', help='run network inference on cpu')
    parser.add_argument('--code_name',type=str,default='None',help='the name of video')
    # parser.add_argument('--track', type=int, default=0, help='track pose id in video')
    # parser.add_argument('--smooth', type=int, default=1, help='smooth pose keypoints')
    args = parser.parse_args()

    args.video = video_source
    args.images = image_source
    if video_name != '':
        args.code_name = video_name

    if args.video == '' and args.images == '':
        raise ValueError('Either --video or --image has to be provided')

    net = jit.load(r'.\weights\openpose.jit')

    # *************************************************************************
    action_net = jit.load(r'.\action_detect\checkPoint\action.jit')
    # ************************************************************************

    frame_provider = ImageReader(args.images)
    if args.video != '':
        frame_provider = VideoReader(args.video,args.code_name)
        # print(frame_provider)
    else:
        images_dir = []
        if os.path.isdir(args.images):
            for img_dir in os.listdir(args.images):
                images_dir.append(os.path.join(args.images, img_dir))
            frame_provider = ImageReader(images_dir)
        else:
            img = cv2.imread(args.images, cv2.IMREAD_COLOR)
            frame_provider = [img]

        # *************************************************************************

        # args.track = 0

    run_demo(net, action_net, frame_provider, args.height_size, args.cpu)
def load_checkpoints(checkpoint_paths, models):
    """Loads checkpoints.

    Either loads a single checkpoint or loads an ensemble of checkpoints
    from `checkpoint_paths`

    Args:
        checkpoint_paths (list[str]): list of paths to checkpoints
        models (list[torch.nn.Module]): models corresponding to the
            checkpoint_paths. 
            If it is left as [], the function assumes that the weights
            are traced (so no models are needed).

    Returns:
        model (torch.nn.Module): The single/ensembled model

    """
    assert isinstance(checkpoint_paths, list), \
        "Make sure checkpoint_paths is specified in list format in the\
        yaml file."
    assert isinstance(models, list), \
        "Make sure model_names is specified in list format in the\
        yaml file."

    if len(models) != 0:
        assert len(checkpoint_paths) == len(models), \
            "The number of checkpoints and models should be the same."

    # single model instances
    if len(checkpoint_paths) == 1:
        try:
            # loading traceable
            model = load(checkpoint_paths[0]).cuda().eval()
            print(f"Traced model from {checkpoint_paths}")
        except:
            model = load_weights(checkpoint_paths[0], models[0]).cuda().eval()
            print(f"Loaded model from {checkpoint_paths}")
    # ensembled models
    elif len(checkpoint_paths) > 1:
        try:
            model = EnsembleModel(
                [load(path).cuda().eval() for path in checkpoint_paths])
        except:
            model = EnsembleModel([
                load_weights(path, model).cuda().eval()
                for (path, model) in zip(checkpoint_paths, models)
            ])
        print(f"Loaded an ensemble from {checkpoint_paths}")
    return model
Exemple #5
0
def load_traced_model(
    model_path: Union[str, Path],
    device: Device = "cpu",
    opt_level: str = None,
) -> ScriptModule:
    """Loads a traced model.

    Args:
        model_path: Path to traced model
        device (str): Torch device
        opt_level (str): Apex FP16 init level, optional

    Returns:
        (ScriptModule): Traced model
    """
    # jit.load dont work with pathlib.Path
    model_path = str(model_path)

    if opt_level is not None:
        device = "cuda"

    model = load(model_path, map_location=device)

    if opt_level is not None:
        assert_fp16_available()
        from apex import amp

        model = amp.initialize(model, optimizers=None, opt_level=opt_level)

    return model
Exemple #6
0
 def _get_torch_script_model(model_path):
     try:
         from torch import jit
     except ImportError:
         raise MissingDependencyException(
             '"torch" package is required for inference with '
             'PytorchLightningModelArtifact')
     return jit.load(model_path)
Exemple #7
0
 def load(cls, dirname, model_class=None):
     if os.path.exists(os.path.join(dirname, cls.JIT_MODEL_NAME)):
         model = jit.load(os.path.join(dirname, cls.JIT_MODEL_NAME),
                          map_location='cpu')
     else:
         assert model_class is not None
         model = model_class.from_trained(dirname)
     trainer_states = torch.load(os.path.join(dirname,
                                              cls.TRAINER_STATES_NAME),
                                 map_location='cpu')
     return cls(model, trainer_states)
Exemple #8
0
 def __init__(self):
     self.net = load("torch_script_eval.pt")
     normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
     self.transforms = transforms.Compose([
         transforms.Resize(256),
         transforms.CenterCrop(224),
         transforms.ToTensor(),
         normalize,
     ])
     self.category = "airplane airport baseball_diamond basketball_court beach bridge chaparral church circular_farmland cloud commercial_area dense_residential desert forest freeway golf_course ground_track_field harbor industrial_area intersection island lake meadow medium_residential mobile_home_park mountain overpass palace parking_lot railway railway_station rectangular_farmland river roundabout runway sea_ice ship snowberg sparse_residential stadium storage_tank tennis_court terrace thermal_power_station wetland".split(
     )
Exemple #9
0
def infer(x: Dataset,
          file: str,
          batch_size: int = 1,
          batch_mode: bool = False,
          use_logistic: bool = True,
          num_workers: int = 1):
    loader = DataLoader(x,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=num_workers,
                        pin_memory=True)
    model = load(file).cuda()
    if batch_mode:
        return _infer_batch(model, loader, use_logistic=use_logistic)

    return _infer(model, loader, use_logistic=use_logistic)
Exemple #10
0
    def __init__(self,
                 file: str,
                 batch_size: int,
                 batch_mode: bool = False,
                 use_logistic: bool = False,
                 num_workers: int = 1,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)

        self.model = load(file).cuda()

        self.batch_size = batch_size
        self.use_logistic = use_logistic
        self.batch_mode = batch_mode
        self.num_workers = num_workers
Exemple #11
0
def init_model():
    """
    Load the saved quantized model for inference.
    """
    # Initialize the Model using the saved Quantized Model
    torch.backends.quantized.engine = 'qnnpack'
    print('\n\nInitializing Model...')
    model = jit.load('models/convnet-traced-new.pt', map_location='cpu')
    print('Model Loaded Successfully')
    # try:

    # except Exception as e:
    #     print(e)
    #     print('ERROR: Could not Initialize the Model.')
    #     sys.exit(1)

    return model
Exemple #12
0
def infer(
    x: Dataset,
    file: str,
    batch_size: int = 1,
    batch_mode: bool = False,
    activation=None,
    num_workers: int = 1,
):
    loader = DataLoader(x,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=num_workers,
                        pin_memory=True)
    model = load(file).cuda()
    if batch_mode:
        return _infer_batch(model, loader, activation=activation)

    return _infer(model, loader, activation=activation)
Exemple #13
0
def load(path, eval=True):
    path = Path(path)
    p = Predictor()

    # TODO: read from tarfile directly instead
    if str(path).endswith('.tar.gz'):
        untar(path)
        path = Path(str(path).rstrip('.tar.gz'))

    p.model = None
    if (path / 'model.th').exists():
        p.model = torch.load(str(path / 'model.th'))
    elif (path / 'model.traced.th').exists():
        from torch import jit
        p.model = jit.load(str(path / 'model.traced.th'))

    if p.model and eval:
        p.model.eval()

    with suppress(FileNotFoundError):
        p.model_state_dict = torch.load(str(path / 'model.state_dict.th'))

    with suppress(FileNotFoundError):
        p.classes = load_json(path / 'classes.json')

    with suppress(FileNotFoundError):
        p.preprocess = load_pickle(path / 'preprocess.pkl')

    with suppress(FileNotFoundError):
        p.postprocess = load_pickle(path / 'postprocess.pkl')

    with suppress(FileNotFoundError):
        p.predict = load_pickle(path / 'predict.pkl')

    with suppress(FileNotFoundError):
        p.meta = load_json(path / 'meta.json')

    with suppress(FileNotFoundError):
        p.kwargs = load_pickle(path / 'kwargs.pkl')

    return p
Exemple #14
0
        'frame': row.frame,
        'path': os.path.join(path_test_dir, row.path)
    } for _, row in test_dataset_paths.iterrows()]

    dataset = TestAntispoofDataset(paths=paths)
    dataloader = DataLoader(dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=False,
                            num_workers=4)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # predict
    samples, probabilities = [], []

    models = [load(x).to(device) for x in glob('*.trcd')]

    with torch.no_grad():
        for video, batch in tqdm(dataloader):
            batch = batch.to(device)

            for model in models:
                proba = torch.softmax(model(batch), dim=1).cpu().numpy()
                proba = proba[:, :-1].sum(axis=1)
                samples.extend(video)
                probabilities.extend(proba)

    # save
    predictions = pd.DataFrame.from_dict({
        'id': samples,
        'probability': probabilities
Exemple #15
0
def detect(save_img=False):
    global ip
    source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source.isnumeric() or source.endswith(
        '.txt') or source.lower().startswith(('rtsp://', 'rtmp://', 'http://'))

    # Directories
    save_dir = Path(
        increment_path(Path(opt.project) / opt.name,
                       exist_ok=opt.exist_ok))  # increment run
    (save_dir / 'labels' if save_txt else save_dir).mkdir(
        parents=True, exist_ok=True)  # make dir

    #加载摔倒检测的模型
    print("加载摔倒检测的模型开始")
    net = jit.load(r'.\weights\openpose.jit')
    action_net = jit.load(r'.\action_detect\checkPoint\action.jit')
    print("加载摔倒检测的模型结束")
    # Initialize
    set_logging()
    # 获取设备
    device = select_device(opt.device)
    # 如果设备为gpu,使用Float16
    half = device.type != 'cpu'  # half precision only supported on CUDA
    half = False
    # Load model
    # 加载Float32模型,确保用户设定的输入图片分辨率能整除32(如果不能则调整为能整除并删除)
    model = attempt_load(weights, map_location=device)  # load FP32 model
    imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size
    # 设置Float16
    if half:
        model.half()  # to FP16

    # Set Dataloader
    # 通过不同的输入源来设置不同的数据加载方式
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)
    else:
        save_img = True
        # 如果检测视频的时候想显示出来,可以再这里加一行 view_img = True
        view_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    # 获取类别的名字
    names = model.module.names if hasattr(model, 'module') else model.names
    # 设置画框的颜色
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]

    # Run inference
    t0 = time.time()
    # 进行一次向前推理,测试程序是否正常
    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img
              ) if device.type != 'cpu' else None  # run once
    """
        path 图片/视频路径
        img 进行resize+pad之后的图片
        img0 原size图片
        cap 当读取图片时为None,读取视频时为视频源
        """
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        # 图片也设置为Float16
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        # 没有batch_size 的话则在最前面增加一个轴
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        """
               前向传播 返回pred的shape是(1, num_boxes, 5+num_class)
               h,w为传入网络图片的长和宽,注意dataset在检测时使用了矩形推理,所以这里h不一定等于w
               num_boxes = h/32 * w/32 + h/16 * w/16 + h/8 * w/8
               pred[..., 0:4]为预测框坐标
               预测框坐标为xywh(中心点+宽长)格式
               pred[..., 4]为objectness置信度
               pred[..., 5:-1]为分类结果
               """
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        """
               pred:前向传播的输出
               conf_thres:置信度阈值
               iou_thres:iou阈值
               classes:是否只保留特定的类别
               agnostic:进行nms是否也去除不同类别之间的框
               经过nms之后,预测框格式:xywh-->xyxy(左上角右下角)
               pred是一个列表list[torch.tensor],长度为batch_size
               每一个torch.tensor的shape为(num_boxes, 6),内容为box+conf+cls
               """
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Apply Classifier
        # 添加二次分类,默认不适用
        # if classify:
        #     pred = apply_classifier(pred, modelc, img, im0s)

        # Process detections
        # 对每一张图片作处理
        for i, det in enumerate(pred):  # detections per image
            # 如果输入源是webcam 则batch_size 不为1,取出dataset中的一张图片
            if webcam:  # batch_size >= 1
                p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(
                ), dataset.count
            else:
                p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
            # print(det)

            boxList = []  # 框的一个list 给openpose 使用
            p = Path(p)  # to Path
            # 设置保存图片/视频的路径
            save_path = str(save_dir / p.name)  # img.jpg
            # 设置保存框最薄txt文件的路径
            txt_path = str(save_dir / 'labels' / p.stem) + (
                '' if dataset.mode == 'image' else f'_{frame}')  # img.txt
            # 设置打印信息(图片长宽)
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  # normalization gain whwh
            if len(det):
                # Rescale boxes from img_size to im0 size
                # 调整预测框的坐标:基于resize+pad的图片的坐标 -->基于原size图片的坐标
                # 此时坐标格式为xyxy
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                # 打印检测到的类别数量
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += f'{n} {names[int(c)]}s, '  # add to string

                # Write results
                # 保存预测结果
                for *xyxy, conf, cls in reversed(det):
                    if save_txt:  # Write to file
                        # 将xyxy(左上角+右下角)格式转为xywh(中心点+宽长)格式,并除上w,h做归一化,转化为列表再保存
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                                gn).view(-1).tolist()  # normalized xywh
                        line = (cls, *xywh, conf) if opt.save_conf else (
                            cls, *xywh)  # label format
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * len(line)).rstrip() % line + '\n')
                    # 在原图上画框
                    if save_img or view_img:  # Add bbox to image
                        label = f'{names[int(cls)]} {conf:.2f}'
                        plot_one_box(xyxy,
                                     im0,
                                     label=label,
                                     color=colors[int(cls)],
                                     line_thickness=3)
                        boxList.append([
                            int(xyxy[0]),
                            int(xyxy[1]),
                            int(xyxy[2]),
                            int(xyxy[3])
                        ])

            print(boxList)
            for box in boxList:
                c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
                x = c2[0] - c1[0]
                y = c2[1] - c1[1]
                if x / y >= 0.8:  # 比例>0.8 可能会是摔倒
                    print('进行人体姿态检测')
                    runOpenpose.run_demo(
                        net, action_net, im0, 256, True,
                        boxList)  # 人体姿态检测 将图片和yolov5检测人体的框也传给openpose
                    break
            # Print time (inference + NMS)
            # 打印向前传播+nms时间
            print(f'{s}Done. ({t2 - t1:.3f}s)')

            if save_img:
                if dataset.mode == 'image':
                    imageName = str(
                        time.strftime('%Y%m%d%H%M%S',
                                      time.localtime(time.time()))) + ".jpg"
                    imagePath = r"/photo/ism_uniform/" + str(ip) + "/" + str(
                        datetime.datetime.now().strftime("%Y-%m-%d")) + "/"
                    if not os.path.exists(r"/photo/ism_uniform/" + str(ip) +
                                          "/"):
                        os.makedirs(r"/photo/ism_uniform/" + str(ip) + "/")
                    if not os.path.exists(
                            r"/photo/ism_uniform/" + str(ip) + "/" + "/" +
                            str(datetime.datetime.now().strftime("%Y-%m-%d"))):
                        os.makedirs(
                            r"/photo/ism_uniform/" + str(ip) + "/" + "/" +
                            str(datetime.datetime.now().strftime("%Y-%m-%d")))
                    cv2.imwrite(imagePath + imageName, im0)
                else:  # 'video'
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer
                        fourcc = 'mp4v'  # output video codec
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*fourcc), fps,
                            (w, h))
                    vid_writer.write(im0)
                print('保存图片')

    # 打印总时间
    print(f'Done. ({time.time() - t0:.3f}s)')
Exemple #16
0
)
parser.add_argument("image", type=str, help="path of signature image")
parser.add_argument(
    "--scan",
    dest="scan",
    action="store_true",
    default=False,
    help="add a scan filter to image while processing (default: False)")
args = parser.parse_args()

# Image Transformations
transform = transforms.Compose([
    transforms.Resize((256, 256)),
    # transforms.RandomVerticalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])

# Loading Jitted Model
model = torchjit.load("modeljit.pth")
model.eval()

# Making Predictions
classes = ["fake", "real"]
img = processimage(args.image, scan=args.scan)
img = transform(img).float().unsqueeze(0)
output = model(img)
index = output.data.numpy().argmax()

print(classes[index])
Exemple #17
0
import PIL.Image
import json

from IPython.display import Image, display

with open('./imagenet_classes.json', mode='rt') as f:
    CLASS_DICT = json.load(f)

IMAGENET_MEAN = np.array([[[0.485, 0.456, 0.406]]], dtype=np.float32)
IMAGENET_STD = np.array([[[0.229, 0.224, 0.225]]], dtype=np.float32)


def predict_imagenet(net, image_file):
    # Load image and resize
    image = PIL.Image.open(image_file).resize((244, 244))
    # Convert to numpy and normalize
    image = np.array(image, dtype=np.float32) / 255.
    image = (image - IMAGENET_MEAN) / IMAGENET_STD
    # Convert to PyTorch and make channel first
    image = torch.as_tensor(image).unsqueeze(0).permute(0, 3, 1, 2)
    # Predict top class
    logits = net(image)
    class_idx = logits.squeeze(0).argmax().item()
    # Output predictions
    print('It is a %s.' % CLASS_DICT[str(class_idx)])
    display(Image(filename=image_file))


net = jit.load('./model.pth')
predict_imagenet(net, './whats_this.jpg')
print('https://i.ytimg.com/vi/2fb-g_V-UT4/hqdefault.jpg')
Exemple #18
0
    def __init__(self, file: str, batch_size: int, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.model = load(file)
        self.batch_size = batch_size
Exemple #19
0
tokenizer = T5Tokenizer.from_pretrained('t5-small')

init_token = tokenizer.pad_token
eos_token = tokenizer.eos_token
pad_token = tokenizer.pad_token
unk_token = tokenizer.unk_token

init_token_idx = tokenizer.convert_tokens_to_ids(init_token)
eos_token_idx = tokenizer.convert_tokens_to_ids(eos_token)
pad_token_idx = tokenizer.convert_tokens_to_ids(pad_token)
unk_token_idx = tokenizer.convert_tokens_to_ids(unk_token)

max_input_length = tokenizer.max_model_input_sizes['t5-small']

new_model = jit.load('t5_ts_qa_model.zip')


def translate_sentence2(sentence, eval_model, max_len=50):

    eval_model.eval()
    eval_model = eval_model.float()

    src_indexes = [init_token_idx] + sentence + [eos_token_idx]

    src_tensor = torch.LongTensor(src_indexes).unsqueeze(0)

    trg_indexes = [init_token_idx]

    for i in range(max_len):
Exemple #20
0
from tqdm import tqdm_notebook
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.jit import load
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from model import *

from mlcomp.contrib.transform.albumentations import ChannelTranspose
from mlcomp.contrib.dataset.classify import ImageDataset
from mlcomp.contrib.transform.rle import rle2mask, mask2rle
from mlcomp.contrib.transform.tta import TtaWrap

unet_se_resnext = load(
    '/kaggle/input/pretrainedmodel/unet_se_resnext50_32x4d.pth').cuda()
unet_mobilenet = load(
    '/kaggle/input/pretrainedmodel/unet_mobilenet2.pth').cuda()
unet_resnet = load('/kaggle/input/pretrainedmodel/unet_resnet34.pth').cuda()

model = Model([unet_se_resnext, unet_mobilenet, unet_resnet])
model_cls = nn.DataParallel(Net(num_class=4))
checkpoint = torch.load(
    '/kaggle/input/pretrainedmodel/resnet_cls_model_ver2.pth',
    map_location='cuda')
model_cls.load_state_dict(checkpoint["state_dict"])
model_cls = model_cls.eval()


def create_transforms(additional):
    res = list(additional)
Exemple #21
0
import cv2
import albumentations as A
from tqdm import tqdm_notebook
import pandas as pd

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.jit import load

from mlcomp.contrib.transform.albumentations import ChannelTranspose
from mlcomp.contrib.dataset.classify import ImageDataset
from mlcomp.contrib.transform.rle import rle2mask, mask2rle
from mlcomp.contrib.transform.tta import TtaWrap
unet_se_resnext50_32x4d = \
    load('/kaggle/input/severstalmodels/unet_se_resnext50_32x4d.pth').cuda()
unet_mobilenet2 = load(
    '/kaggle/input/severstalmodels/unet_mobilenet2.pth').cuda()
unet_resnet34 = load('/kaggle/input/severstalmodels/unet_resnet34.pth').cuda()


class Model:
    def __init__(self, models):
        self.models = models

    def __call__(self, x):
        res = []
        x = x.cuda()
        with torch.no_grad():
            for m in self.models:
                res.append(m(x))
import torch
from torch import jit
import cv2
import time

# Load the torchscript model
model = jit.load('/models/run10/model.zip')
# Set the model to evaluate mode
model = model.cuda().eval()

# Export ONNX format
x = torch.ones(1, 3, 320, 640, requires_grad=True).cuda()
#torch.onnx.export(model,x,'/home/brian/models/run10/jetracer.onnx',export_params=True,opset_version=11,do_constant_folding=True)
torch.onnx.export(model,
                  x,
                  '/home/brian/models/run10/jetracer.onnx',
                  export_params=True)

ino = 536
# Read  a sample image and mask from the data-set
img = cv2.imread(f'/models/train_data/Images/{ino:03d}.jpg').transpose(
    2, 0, 1).reshape(1, 3, 320, 640)
mask = cv2.imread(f'/models/train_data/Masks/{ino:03d}_mask.png')

torch.cuda.current_stream().synchronize()
t0 = time.time()

with torch.no_grad():
    a = model(torch.from_numpy(img).type(torch.cuda.FloatTensor) / 255)

torch.cuda.current_stream().synchronize()
 def _load_model(self):
     PATH = self._find_with_extension(EXTENSION)
     self.model = jit.load(PATH)
Exemple #24
0
    def segmentation(self, data):
        """
        Perform semantic segmentation
        Parameters
        ----------
        data: dict contain:
            input_files: str path to input layers
            neural_network: str path to neural network
            output_file: str path where to save the output

        Returns
        -------
        True if segmentation has been correctly perfomed. Raise an exception otherwise.
        """
        cfg = self.cfg["segmentation"]
        neural_net = data["neural_network"]
        print("\tStep 1: Load inputs.")
        if self.ssh_server:
            neural_net = self.ssh_server.get(neural_net, cache=True)

        if isinstance(self.cache, dict) and self.cache.get("net_file") == neural_net:
            net = self.cache["net"]
        else:
            net = jit.load(neural_net, map_location=self.device)
            if isinstance(self.cache, dict):
                self.cache["net"] = net
                self.cache["net_file"] = neural_net

        self.n_classes = find_n_classes(net)

        fp, inputs, nodata_mask = self._prepare_inputs(data, "segmentation")
        result = self._tile_fp(fp, cfg)
        if isinstance(result, str):
            return result
        else:
            tiled_fp = result
        # check the inputs
        sum_channs, channels_net = check_inputs_and_net(inputs, net)
        if sum_channs != channels_net:
            warn = f"Warning: {sum_channs} input channels while the chosen net expects {channels_net} channels."
            print_warning(warn)
            return warn
        print("\tStep 2: Inference")
        with torch.no_grad():
            pred = torch.zeros((self.n_classes, *fp.shape)).to(
                self.device, torch.double
            )
            # Slides a window across the image
            for batch_fps in make_batches(cfg["batch_size"], tiled_fp):
                inputs_patches = [
                    from_coord_to_patch(x, batch_fps, self.original_fp) for x in inputs
                ]
                outs = net(torch.cat(inputs_patches, dim=1)).data.to(torch.double)

                for out, sub_fp in zip(outs, batch_fps):
                    small_sub_slice = sub_fp.slice_in(fp, clip=True)
                    big_sub_slice = fp.slice_in(sub_fp, clip=True)
                    pred[:, small_sub_slice[0], small_sub_slice[1]] += out[
                        :, big_sub_slice[0], big_sub_slice[1]
                    ]

        mask = torch.argmax(pred, dim=0).cpu().numpy()
        print("\tStep 3: Save outputs")
        self._save_output(data, mask, nodata_mask, fp)
        return mask
    while not terminal:
        step += 1
        print()
        simulator.render(state)

        if step % 2 == 0:
            action = int(input("Action: "))
        else:
            root = alpha_zero.mcts(state,
                                   mask,
                                   simulator,
                                   network,
                                   config,
                                   root_node=root)
            action = np.random.choice(mask.shape[0], p=root.action_policy)

        state, _, terminal, _ = simulator.step(state, action)
        mask = simulator.action_space.as_discrete.action_mask(state)
        if root is not None:
            root = root.children[action]

    simulator.render(state)


if __name__ == "__main__":
    args = parser.parse_args()

    net = jit.load(os.path.join(args.save_path, "network.pt"))
    play(simulators.ConnectFour(), net, alpha_zero.MCTSConfig())
Exemple #26
0
import torch.nn.functional as F
from networks.imageunet import init_network
import cv2
import numpy as np
import pandas as pd
from torchvision import models
import albumentations as albu
from efficientnet_pytorch import EfficientNet
import segmentation_models_pytorch as smp
# model_params = {}
# model_params['architecture'] = "unet_se_resnext50_cbam_v0a"
# seg_model = init_network(model_params)
from torch.jit import load
from albumentations.pytorch.transforms import ToTensor
unet_se_resnext50_32x4d = load(
    'E:/pycharm_project/steel/baseline_model/unet_se_resnext50_32x4d.pth'
).cuda()
unet_se_resnext50_32x4d.eval()
unet_mobilenet2 = load(
    'E:/pycharm_project/steel/baseline_model/unet_mobilenet2.pth').cuda()
unet_mobilenet2.eval()
unet_resnet34 = load(
    'E:/pycharm_project/steel/baseline_model/unet_resnet34.pth').cuda()
unet_resnet34.eval()

# model_params = {}
# arch = "unet_se_resnext50_cbam_v0a"
# model_params['architecture'] = arch
# unet_se_resnext50_32x4d = init_network(model_params).cuda()
# unet_se_resnext50_32x4d.load_state_dict(torch.load('E:/pycharm_project/steel/code/save_model/lb89277.pth')['state_dict'])
mymodel = smp.Unet("efficientnet-b0",
Exemple #27
0
import cv2
import albumentations as A
from tqdm import tqdm_notebook
import pandas as pd

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.jit import load

from mlcomp.contrib.transform.albumentations import ChannelTranspose
from mlcomp.contrib.dataset.classify import ImageDataset
from mlcomp.contrib.transform.rle import rle2mask, mask2rle
from mlcomp.contrib.transform.tta import TtaWrap

unet_se_resnext50_32x4d = load(
    "/kaggle/input/severstalmodels/unet_se_resnext50_32x4d.pth").cuda()
unet_mobilenet2 = load(
    "/kaggle/input/severstalmodels/unet_mobilenet2.pth").cuda()
# unet_resnet34 = load('/kaggle/input/severstalmodels/unet_resnet34.pth').cuda()

import os
from segmentation_models_pytorch import Unet, FPN

ENCODER = "resnet34"
ENCODER_WEIGHTS = "imagenet"
DEVICE = "cuda"

CLASSES = ["0", "1", "2", "3", "4"]
ACTIVATION = "softmax"

unet_resnet34 = Unet(encoder_name=ENCODER,