Esempio n. 1
0
def find_lr(dataloader_train, optimizer, net, device):
    print('Find Best lr...')
    clr = CLR.CLR(optimizer, len(dataloader_train))

    t = tqdm(dataloader_train, leave=False, total=len(dataloader_train))
    running_loss = 0.
    avg_beta = 0.98
    net.train()
    for i, (img, visit, out_gt) in enumerate(t):

        img = img.to(opt.device)
        visit = visit.to(opt.device)
        out_gt = out_gt.to(opt.device)
        out, _ = net(img, visit)

        loss = loss_func(out, out_gt)

        running_loss = avg_beta * running_loss + (1 - avg_beta) * loss.item()
        smoothed_loss = running_loss / (1 - avg_beta**(i + 1))
        t.set_postfix(loss=smoothed_loss)

        lr = clr.calc_lr(smoothed_loss)
        if lr == -1:
            break
        update_lr(optimizer, lr)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    clr.plot()
    print('best lr: {:.6f}'.format(clr.lrs[np.argmin(np.array(clr.losses))]))
    g()  # 保存所有临时变量
Esempio n. 2
0
def nanDete(t, globalg=False):
    nan = th.isnan(t).sum()
    if nan:
        if globalg:
            g(1)
        ar(t)
        raise LookupError('Has torch.nan')
Esempio n. 3
0
def predict(dataloader_test, device, *nets):
    """预测输出"""
    sm = nn.Softmax(dim=1)
    labs_out = []
    out_mat = []
    with torch.no_grad():
        for (img, visit, _) in tqdm(dataloader_test):
            if isinstance(img, list):
                img_tta = img
            else:
                img_tta = (img, )

            visit = visit.to(device)

            out_mat_h = []
            for cnt, net in enumerate(nets):
                net.eval()
                for i in range(len(img_tta)):
                    out_tta_tmp = net(img_tta[i].to(device), visit)

                    if isinstance(out_tta_tmp, tuple):
                        out_tta_tmp = out_tta_tmp[0]


#                    out_tta_tmp = out_tta_tmp + 2*torch.mul(out_tta_tmp, torch.le(out_tta_tmp,0).float())

                    out_tta_tmp = sm(out_tta_tmp)

                    out_mat_h.append(out_tta_tmp.cpu().numpy())

                    if (i == 0):
                        out_tta = out_tta_tmp
                    else:
                        out_tta = out_tta + out_tta_tmp

                out_tmp = out_tta

                if (cnt == 0):
                    out = out_tmp
                else:
                    out = out + out_tmp

            _, preds = torch.max(out, 1)
            labs_out.append(preds.cpu().numpy().flatten().astype(np.uint8) + 1)
            out_mat.append(
                np.array(out_mat_h).transpose((1, 0, 2)).reshape(
                    (visit.shape[0], -1)))
    labs_out_np = []
    for j in range(len(labs_out)):
        for i in range(len(labs_out[j])):
            labs_out_np.append(labs_out[j][i])
    labs_out_np = np.array(labs_out_np)
    fea = out_mat[0]
    for i in range(1, len(out_mat)):
        tmp = out_mat[i]
        fea = np.r_[fea, tmp]
    g()
    return labs_out_np, fea
Esempio n. 4
0
 def test(cls):
     hdri_dir = "/tmp/hdri"
     hm = HdriManager(hdri_dir=hdri_dir,
                      category="indoor",
                      download=False,
                      debug=True)
     for i in range(10):
         hdri = hm.sample()
         print(hdri)
         assert hm.category in hdri
     boxx.g()
Esempio n. 5
0
File: try.py Progetto: DIYer22/zcs
def get_parser():
    parser = argparse.ArgumentParser(prog="PROG", allow_abbrev=True)
    parser.add_argument("a", type=float, default=None)
    ac = parser.add_argument("--foobar", action="store_true")
    ac = parser.add_argument("--c", type=eval, default={})
    parser._option_string_actions.pop("--c")
    ac = parser.add_argument("--c", type=eval, default={})
    # ac = parser.add_argument('--d', default=None, choices=['rock', 'paper', 'scissors'])
    if len(sys.argv) >= 2:
        args = parser.parse_args()
    else:
        args = parser.parse_args(["1", "--foobar", "--c", "[1 , 4]"])

    pad = parser.__dict__
    g()
    return args
    print(args)
Esempio n. 6
0
        def download(name):
            try:
                if self.debug:
                    print(name)
                prefix = f"{name}_{resolution}"
                paths = boxx.glob(os.path.join(hdri_dir, prefix + "*"))
                if len(paths):
                    return paths[0]
                url = f"https://hdrihaven.com/hdri/?h={name}"
                html = BeautifulSoup(
                    rq.get(url, timeout=5).text,
                    features="html.parser",
                )
                href = [
                    a["href"] for a in html.find_all("a")
                    if f"_{resolution}." in a.get("href")
                ][0]
                cats = [
                    a.text for a in html.find(
                        text="Categories:").parent.parent.find_all("a")
                ]
                tags = [
                    a.text for a in html.find(
                        text="Tags:").parent.parent.find_all("a")
                ]
                name = f"{prefix}.{'='.join(cats)}.{'='.join(tags)}.{href[-3:]}"

                path = pathjoin(hdri_dir, name)
                r = rq.get(href, timeout=5)
                assert r.status_code == 200
                os.makedirs(hdri_dir, exist_ok=True)
                with open(path, "wb") as f:
                    f.write(r.content)
                return path
            except Exception as e:
                if self.debug:
                    boxx.pred - name
                    boxx.g()
                raise e
Esempio n. 7
0
 def forward(self, x):
     mean = [0.485, 0.456, 0.406]
     std = [0.229, 0.224, 0.225]
     
     x = torch.cat([
             (x-mean[2])/std[2],
             (x-mean[1])/std[1],
             (x-mean[0])/std[0],
             ], 1)
 
     x = self.conv1(x)           #64, 64, 64
     e2 = self.encoder2(x)       #64, 64, 64
     e3 = self.encoder3(e2)      #128, 32, 32
     e4 = self.encoder4(e3)      #256, 16, 16
     e5 = self.encoder5(e4)      #512, 8, 8
     
     f = self.center(e5)         #256, 4, 4
     d5 = self.decoder5(f, e5)   #64, 8, 8        
     d4 = self.decoder4(d5, e4)  #64, 16, 16
     d3 = self.decoder3(d4, e3)  #64, 32, 32
     d2 = self.decoder2(d3, e2)  #64, 64, 64
     d1 = self.decoder1(d2)      #64, 128, 128
     
     
     f = torch.cat((
             d1,
             F.upsample(d2, scale_factor=2, mode='bilinear', align_corners=False),
             F.upsample(d3, scale_factor=4, mode='bilinear', align_corners=False),
             F.upsample(d4, scale_factor=8, mode='bilinear', align_corners=False),
             F.upsample(d5, scale_factor=16, mode='bilinear', align_corners=False),
             ), 1)               #320, 128, 128
     f = self.se_f(f)
     f = F.dropout2d(f, p=0.5)
     out = self.outc(f)          #1, 101,101
 
     from boxx import g
     g()
     return out
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Webcam Demo")
    parser.add_argument(
        "--config-file",
        default="configs/osis/osis_R_50_FPN_1x.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--weights",
        default="models/distributed_test/model_final.pth",
        metavar="FILE",
        help="path to the trained model",
    )
    parser.add_argument(
        "--images-dir",
        default="demo/images",
        metavar="DIR",
        help="path to demo images directory",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        default=800,
        help="Smallest size of the image to feed to the model. "
        "Model was trained with 800, which gives best results",
    )
    parser.add_argument(
        "opts",
        help="Modify model config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    # load config from file and command-line arguments
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.MODEL.WEIGHT = args.weights

    cfg.freeze()

    # The following per-class thresholds are computed by maximizing
    # per-class f-measure in their precision-recall curve.
    # Please see compute_thresholds_for_classes() in coco_eval.py for details.
    thresholds_for_classes = [
        0.49211737513542175, 0.49340692162513733, 0.510103702545166,
        0.4707475006580353, 0.5197340250015259, 0.5007652044296265,
        0.5611110329627991, 0.4639902412891388, 0.4778415560722351,
        0.43332818150520325, 0.6180170178413391, 0.5248752236366272,
        0.5437473654747009, 0.5153843760490417, 0.4194680452346802,
        0.5640717148780823, 0.5087228417396545, 0.5021755695343018,
        0.5307778716087341, 0.4920770823955536, 0.5202335119247437,
        0.5715234279632568, 0.5089765191078186, 0.5422378778457642,
        0.45138806104660034, 0.49631351232528687, 0.4388565421104431,
        0.47193753719329834, 0.47037890553474426, 0.4791252017021179,
        0.45699411630630493, 0.48658522963523865, 0.4580649137496948,
        0.4603237509727478, 0.5243804454803467, 0.5235602855682373,
        0.48501554131507874, 0.5173789858818054, 0.4978085160255432,
        0.4626562297344208, 0.48144686222076416, 0.4889853894710541,
        0.4749937951564789, 0.42273756861686707, 0.47836390137672424,
        0.48752328753471375, 0.44069987535476685, 0.4241463541984558,
        0.5228247046470642, 0.4834112524986267, 0.4538525640964508,
        0.4730372428894043, 0.471712201833725, 0.5180512070655823,
        0.4671719968318939, 0.46602892875671387, 0.47536996006965637,
        0.487352192401886, 0.4771934747695923, 0.45533207058906555,
        0.43941256403923035, 0.5910647511482239, 0.554875910282135,
        0.49752360582351685, 0.6263655424118042, 0.4964958727359772,
        0.5542593002319336, 0.5049241185188293, 0.5306999087333679,
        0.5279538035392761, 0.5708096623420715, 0.524990975856781,
        0.5187852382659912, 0.41242220997810364, 0.5409807562828064,
        0.48504579067230225, 0.47305455803871155, 0.4814004898071289,
        0.42680642008781433, 0.4143834114074707
    ]

    demo_im_names = os.listdir(args.images_dir)

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_thresholds_for_classes=thresholds_for_classes,
        min_image_size=args.min_image_size)

    for im_name in demo_im_names[:1]:
        img = cv2.imread(os.path.join(args.images_dir, im_name))
        if img is None:
            continue
        start_time = time.time()

        semantics, instances = coco_demo.my_run(img)
        show(img[:, :, ::-1], figsize=(10, 10))

    print("Press any keys to exit ...")

    g()
Esempio n. 9
0
def eval_net(loss_func, dataloader_val, device, *nets, judge_res=False):
    """用验证集评判网络性能"""
    sm = nn.Softmax(dim=1)
    acc_temp = Record()
    loss_temp = Record()
    labs_ori, labs_out = [], []
    out_mat = []
    save_path = r'E:\pic\URFC-baidu-2\eval-res'
    save_cnt = 0
    with torch.no_grad():
        for (img, visit, out_gt) in tqdm(dataloader_val):
            if isinstance(img, list):
                img_tta = img
            else:
                img_tta = (img, )

            # 上下翻转


#            visit = visit.numpy()
#            visit = visit[:,:,::-1].copy()
#            visit = torch.as_tensor(visit, dtype=torch.float32)

            visit = visit.to(device)
            out_gt = out_gt.to(device)

            out_mat_h = []
            for cnt, net in enumerate(nets):
                net.eval()

                for i in range(len(img_tta)):
                    out_tta_tmp = net(img_tta[i].to(device), visit)

                    if isinstance(out_tta_tmp, tuple):
                        out_tta_tmp = out_tta_tmp[0]

                    out_tta_tmp = sm(out_tta_tmp)

                    # 投票法,每行最大值赋1,其他为0
                    #                    mat_tmp = torch.max(out_tta_tmp, 1)[0].repeat(out_tta_tmp.shape[1],1).transpose(1,0)
                    #                    out_tta_tmp = (mat_tmp == out_tta_tmp).float()

                    out_mat_h.append(out_tta_tmp.cpu().numpy())

                    if (i == 0):
                        out_tta = out_tta_tmp
                    else:
                        out_tta = out_tta + out_tta_tmp

                out_tmp = out_tta

                if (cnt == 0):
                    out = out_tmp
                else:
                    out = out + out_tmp

            loss = loss_func(out, out_gt)
            _, preds = torch.max(out, 1)

            # 保存结果以查看
            if judge_res:
                for j in range(len(out_gt)):
                    if preds[j] == out_gt.data[j]:
                        torchvision.utils.save_image(
                            img_tta[0][j, :],
                            join(save_path, 'T', r'{}.jpg'.format(save_cnt)))
                    else:
                        torchvision.utils.save_image(
                            img_tta[0][j, :],
                            join(save_path, 'F', r'{}.jpg'.format(save_cnt)))
                    save_cnt += 1

            loss_temp.update(loss.item(), img_tta[0].shape[0])
            acc_temp.update(
                (float(torch.sum(preds == out_gt.data)) / len(out_gt)),
                len(out_gt))
            labs_ori.append(out_gt.cpu().numpy())
            labs_out.append(preds.cpu().numpy().flatten().astype(np.uint8))
            out_mat.append(
                np.array(out_mat_h).transpose((1, 0, 2)).reshape(
                    (visit.shape[0], -1)))
    labs_ori_np = []
    labs_out_np = []
    for j in range(len(labs_ori)):
        for i in range(len(labs_ori[j])):
            labs_ori_np.append(labs_ori[j][i])
            labs_out_np.append(labs_out[j][i])
    labs_ori_np = np.array(labs_ori_np)
    labs_out_np = np.array(labs_out_np)
    fea = out_mat[0]
    for i in range(1, len(out_mat)):
        tmp = out_mat[i]
        fea = np.r_[fea, tmp]
    g()
    return loss_temp.avg, acc_temp.avg, labs_ori_np, labs_out_np, fea