コード例 #1
0
    def __init__(self, config, fold):

        if config["enet_type"] in Constant.RESNET_LIST:
            ModelClass = MultiLabelModel
        else:
            raise NotImplementedError()

        if config["eval"] == 'best':     
            model_file = os.path.join(config["model_dir"], f'best_fold{fold}.pth')
        if config["eval"] == 'final':    
            model_file = os.path.join(config["model_dir"], f'final_fold{fold}.pth')
        self.model = ModelClass(
            config["enet_type"],
            config["out_dim1"],
            config["out_dim2"],
            pretrained = config["pretrained"] )
        self.model = self.model.to(device)

        try:  # single GPU model_file
            self.model.load_state_dict(torch.load(model_file), strict=True)
        except:  # multi GPU model_file
            state_dict = torch.load(model_file)
            state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
            self.model.load_state_dict(state_dict, strict=True)
        self.model.eval()

        _, self.transforms_val = get_transforms(config["image_size"])  
コード例 #2
0
    def __init__(self, config, fold):

        if config["enet_type"] in Constant.RESNEST_LIST:
            ModelClass = Resnest
        elif config["enet_type"] in Constant.SERESNEXT_LIST:
            ModelClass = SeResnext
        elif config["enet_type"] in Constant.GEFFNET_LIST:
            ModelClass = Effnet
        else:
            raise NotImplementedError()

        if config["eval"] == 'best':
            model_file = os.path.join(config["model_dir"],
                                      f'best_fold{fold}.pth')
        if config["eval"] == 'final':
            model_file = os.path.join(config["model_dir"],
                                      f'final_fold{fold}.pth')
        self.model = ModelClass(enet_type=config["enet_type"],
                                out_dim=int(config["out_dim"]),
                                drop_nums=int(config["drop_nums"]),
                                metric_strategy=config["metric_strategy"])
        self.model = self.model.to(device)

        try:  # single GPU model_file
            self.model.load_state_dict(torch.load(model_file), strict=True)
        except:  # multi GPU model_file
            state_dict = torch.load(model_file)
            state_dict = {
                k[7:] if k.startswith('module.') else k: state_dict[k]
                for k in state_dict.keys()
            }
            self.model.load_state_dict(state_dict, strict=True)
        self.model.eval()

        _, self.transforms_val = get_transforms(config["image_size"])
コード例 #3
0
def gen_onnx(args):


    if config["eval"] == 'best':
        model_file = os.path.join(config["model_dir"], f'best_fold{args.fold}.pth')
    if config["eval"] == 'final':
        model_file = os.path.join(config["model_dir"], f'final_fold{args.fold}.pth')


    try:  # single GPU model_file
        model.load_state_dict(torch.load(model_file), strict=True)
    except:  # multi GPU model_file
        state_dict = torch.load(model_file)
        state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
        model.load_state_dict(state_dict, strict=True)

    model.eval()

    print('load model ok.....')


    img = cv2.imread(args.img_path)
    transforms_train, transforms_val = get_transforms(config["image_size"])
    # img1 = transforms.ToTensor()(img1)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    res = transforms_val(image=img)
    img1 = res['image'].astype(np.float32)
    img1 = img1.transpose(2, 0, 1)
    img1 = torch.tensor([img1]).float()

    s = time.time()
    with torch.no_grad():
        out = model(img1.to(device))
        probs = out.cpu().detach().numpy()
        print ("probs>>>>>",probs)

    print('cost time:',time.time()-s)
    if isinstance(out,dict):
        out = out['f_score']

    cv2.imwrite('./onnx/ori_output.jpg',out[0,0].cpu().detach().numpy()*255)

    output_onnx = args.save_path
    print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
    input_names = ["input"]
    # output_names = ["hm" , "wh"  , "reg"]
    output_names = ["out"]
    dynamic_axes = {'input': {0: 'batch'}, 'out': {0: 'batch'}}
    inputs = torch.randn(args.batch_size, 3,512,512).cuda()
    '''
    export_type = torch.onnx.OperatorExportTypes.ONNX
    torch_out = torch.onnx._export(model, inputs, output_onnx, export_params=True, verbose=False,do_constant_folding=False,keep_initializers_as_inputs=True,
                                   input_names=input_names, output_names=output_names, operator_export_type=export_type, dynamic_axes=dynamic_axes)
    '''
    # torch.onnx.export(model, inputs, output_onnx, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes)
    # torch.onnx.export(model, inputs, output_onnx, verbose=False, export_params=True, training=False, opset_version=10, example_outputs=probs, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes)
    torch.onnx.export(model, inputs, output_onnx)
    '''
コード例 #4
0
def main():

    df, df_test, mel_idx = get_df(config["data_dir"], config["auc_index"])

    transforms_train, transforms_val = get_transforms(config["image_size"])

    folds = [int(i) for i in config["fold"].split(',')]
    for fold in folds:
        run(fold, df, transforms_train, transforms_val, mel_idx)
コード例 #5
0
 def forward(self, img_path):
     try:
         img = cv2.imread(img_path)
         transforms_train, transforms_val = get_transforms(
             config["image_size"])
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         res = transforms_val(image=img)
         img1 = res['image'].astype(np.float32)
         img1 = img1.transpose(2, 0, 1)
         inputs = img1
         inputs = np.expand_dims(inputs, axis=0)
         inputs = np.array(inputs, copy=True, dtype=np.float16)
         inp_batch = inputs.shape[0]
         if inp_batch < self.inputs_shape[0]:
             outputs = self.less_predict(inputs)
         elif inp_batch == self.inputs_shape[0]:
             print('batch size equal ')
             outputs = self.engine.run([inputs])[0]
         else:
             print('inputs batch greater than engine inputs')
             outputs = []
             ixs = list(range(0, inp_batch,
                              self.inputs_shape[0])) + [inp_batch]
             for i in ixs:
                 if i != 0:
                     inp = inputs[li:i, :]
                     if inp.shape[0] == self.inputs_shape[0]:
                         outs = self.engine.run([inp])[0]
                     else:
                         outs = self.less_predict(inp)
                     t = outs.copy()
                     outputs.append(t)
                 li = i
             outputs = np.vstack(outputs)
         outputs = torch.tensor(outputs)
         print("outputs:", outputs)
         outputs = F.softmax(outputs, dim=1).cpu()
         score, class_prediction = torch.max(outputs, 1)
         return score, class_prediction
     except Exception as e:
         raise e
コード例 #6
0
def main():

    df, df_test, mel_idx = get_df(config["data_dir"], config["auc_index"])

    _, transforms_val = get_transforms(int(config["image_size"]))

    LOGITS = []
    PROBS = []
    dfs = []
    for fold in range(args.n_splits):

        df_valid = df[df['fold'] == fold]

        dataset_valid = QDDataset(df_valid, 'valid', transform=transforms_val)
        valid_loader = torch.utils.data.DataLoader(
            dataset_valid,
            batch_size=int(config["batch_size"]),
            num_workers=int(config["num_workers"]))

        if config["eval"] == 'best':
            model_file = os.path.join(config["model_dir"],
                                      f'best_fold{fold}.pth')
        if config["eval"] == 'final':
            model_file = os.path.join(config["model_dir"],
                                      f'final_fold{fold}.pth')

        model = ModelClass(enet_type=config["enet_type"],
                           out_dim=int(config["out_dim"]),
                           drop_nums=int(config["drop_nums"]),
                           metric_strategy=config["metric_strategy"])
        model = model.to(device)

        try:  # single GPU model_file
            model.load_state_dict(torch.load(model_file), strict=True)
        except:  # multi GPU model_file
            state_dict = torch.load(model_file)
            state_dict = {
                k[7:] if k.startswith('module.') else k: state_dict[k]
                for k in state_dict.keys()
            }
            model.load_state_dict(state_dict, strict=True)

        if len(os.environ['CUDA_VISIBLE_DEVICES']) > 1:
            model = torch.nn.DataParallel(model)

        model.eval()

        this_LOGITS, this_PROBS = val_epoch(model,
                                            valid_loader,
                                            mel_idx,
                                            get_output=True)
        LOGITS.append(this_LOGITS)
        PROBS.append(this_PROBS)
        dfs.append(df_valid)

    dfs = pd.concat(dfs).reset_index(drop=True)
    dfs['pred'] = np.concatenate(PROBS).squeeze()[:, mel_idx]

    auc_all_raw = roc_auc_score(dfs['target'] == mel_idx, dfs['pred'])

    dfs2 = dfs.copy()
    for i in range(args.n_splits):
        dfs2.loc[dfs2['fold'] == i, 'pred'] = dfs2.loc[dfs2['fold'] == i,
                                                       'pred'].rank(pct=True)
    auc_all_rank = roc_auc_score(dfs2['target'] == mel_idx, dfs2['pred'])

    content = f'Eval {config["eval"]}:\nauc_all_raw : {auc_all_raw:.5f}\nauc_all_rank : {auc_all_rank:.5f}\n'
    print(content)
    with open(os.path.join(config["log_dir"], f'log.txt'), 'a') as appender:
        appender.write(content + '\n')

    np.save(os.path.join(config["oof_dir"], f'{config["eval"]}_oof.npy'),
            dfs['pred'].values)
コード例 #7
0
 def forward(self, img_path):
     try:
         img = cv2.imread(img_path)
         transforms_train, transforms_val = get_transforms(
             config["image_size"])
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         res = transforms_val(image=img)
         img1 = res['image'].astype(np.float32)
         img1 = img1.transpose(2, 0, 1)
         inputs = img1
         inputs = np.expand_dims(inputs, axis=0)
         inputs = np.array(inputs, copy=True, dtype=np.float16)
         inp_batch = inputs.shape[0]
         if inp_batch < self.inputs_shape[0]:
             outputs0, outputs1 = self.less_predict(inputs)
         elif inp_batch == self.inputs_shape[0]:
             print('batch size equal ')
             outputs = self.engine.run([inputs])
             outputs0 = outputs[0]
             outputs1 = outputs[1]
         else:
             print('inputs batch greater than engine inputs')
             outputs0 = []
             outputs1 = []
             ixs = list(range(0, inp_batch,
                              self.inputs_shape[0])) + [inp_batch]
             for i in ixs:
                 if i != 0:
                     inp = inputs[li:i, :]
                     if inp.shape[0] == self.inputs_shape[0]:
                         outs = self.engine.run([inp])
                         outs0, outs1 = outs[0], outs[1]
                     else:
                         outs0, outs1 = self.less_predict(inp)
                     t0 = outs0.copy()
                     outputs0.append(t0)
                     t1 = outs1.copy()
                     outputs0.append(t1)
                 li = i
             outputs0 = np.vstack(outputs0)
             outputs1 = np.vstack(outputs1)
         outputs0 = torch.tensor(outputs0)
         outputs1 = torch.tensor(outputs1)
         print("outputs0:", outputs0)
         print("outputs1:", outputs1)
         probs_color = F.softmax(outputs0, dim=1)
         probs_color = probs_color.cpu().detach().numpy()
         ouputs_color = probs_color.argmax(1)
         probs_color = [
             probs_color[i][ouputs_color[i]]
             for i in range(len(ouputs_color))
         ]
         probs_action = F.softmax(outputs1, dim=1)
         probs_action = probs_action.cpu().detach().numpy()
         ouputs_action = probs_action.argmax(1)
         probs_action = [
             probs_action[i][ouputs_action[i]]
             for i in range(len(ouputs_action))
         ]
         return ouputs_color, probs_color, ouputs_action, probs_action
     except Exception as e:
         raise e