Exemple #1
0
    def update(self, x, y):

        # L1 & SSIM loss
        cleaned = self.cleaner(x)

        loss = get_default_loss(cleaned, y, self.avg_meters)

        self.g_optimizer.zero_grad()
        loss.backward()
        self.g_optimizer.step()

        return {'recovered': cleaned}
Exemple #2
0
    def update(self, sample):
        y = sample['label'].to(opt.device)

        output = self.forward(sample)

        loss = get_default_loss(output, y, self.avg_meters)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return {'output': output}
Exemple #3
0
def learner_init(uid: str, cfg: CN) -> Learner:
    device = torch.device('cuda')
    data = get_data(cfg)

    # Ugly hack because I wanted ratios, scales
    # in fractional formats
    if type(cfg['ratios']) != list:
        ratios = eval(cfg['ratios'], {})
    else:
        ratios = cfg['ratios']
    if type(cfg['scales']) != list:
        scales = cfg['scale_factor'] * np.array(eval(cfg['scales'], {}))
    else:
        scales = cfg['scale_factor'] * np.array(cfg['scales'])

    num_anchors = len(ratios) * len(scales)

    mdl = get_default_net(num_anchors=num_anchors, cfg=cfg)
    mdl.to(device)
    if cfg.do_dist:
        mdl = torch.nn.parallel.DistributedDataParallel(
            mdl,
            device_ids=[cfg.local_rank],
            output_device=cfg.local_rank,
            broadcast_buffers=True,
            find_unused_parameters=True)
    elif not cfg.do_dist and cfg.num_gpus:
        # Use data parallel
        mdl = torch.nn.DataParallel(mdl)

    loss_fn = get_default_loss(ratios, scales, cfg)
    loss_fn.to(device)

    eval_fn = get_default_eval(ratios, scales, cfg)
    # eval_fn.to(device)
    opt_fn = partial(torch.optim.Adam, betas=(0.9, 0.99))

    learn = Learner(uid=uid,
                    data=data,
                    mdl=mdl,
                    loss_fn=loss_fn,
                    opt_fn=opt_fn,
                    eval_fn=eval_fn,
                    device=device,
                    cfg=cfg)
    return learn
def learner_init(uid, cfg):
    device_count = torch.cuda.device_count()
    device = torch.device('cuda')

    if type(cfg['ratios']) != list:
        ratios = eval(cfg['ratios'], {})
    else:
        ratios = cfg['ratios']
    if type(cfg['scales']) != list:
        scales = cfg['scale_factor'] * np.array(eval(cfg['scales'], {}))
    else:
        scales = cfg['scale_factor'] * np.array(cfg['scales'])

    num_anchors = len(ratios) * len(scales)

    qnet = get_default_net(num_anchors=num_anchors, cfg=cfg)
    qnet = qnet.to(device)
    qnet = torch.nn.DataParallel(qnet)

    qlos = get_default_loss(ratios, scales, cfg)
    qlos = qlos.to(device)
    qeval = Evaluator(ratios, scales, cfg)
    # db = get_data(bs=cfg['bs'] * device_count, nw=cfg['nw'], bsv=cfg['bsv'] * device_count,
    #               nwv=cfg['nwv'], devices=cfg['devices'], do_tfms=cfg['do_tfms'],
    #               cfg=cfg, data_cfg=data_cfg)
    # db = get_data(cfg, ds_name=cfg['ds_to_use'])
    db = get_data(cfg)
    opt_fn = partial(torch.optim.Adam, betas=(0.9, 0.99))

    # Note: Currently using default optimizer
    learn = Learner(uid=uid,
                    data=db,
                    mdl=qnet,
                    loss_fn=qlos,
                    opt_fn=opt_fn,
                    eval_fn=qeval,
                    device=device,
                    cfg=cfg)
    return learn