Exemplo n.º 1
0
def modelCombine(models, f, conf=basicConf, arg=None):
    bank = bankManage.Bank()
    curve = loss.Loss(conf.StartValue)
    sizes = []
    for model, weit in models:
        sizes.extend(model.conf.getSizes())
    phase = wave.Phase(sizes)
    tt = 0
    vs = []
    N = 5000
    restBank = 0  #余量窗口
    bw1 = 0.6
    bw2 = 0.4
    lastLen = 0
    for line in f:
        vs.append(line)
        if len(vs) >= N:
            phase1 = copy.deepcopy(phase)
            curve1 = loss.Loss(curve.now)
            bank1 = copy.deepcopy(bank)
            tt1 = tt
            (phase, curve, bank, tt,
             restBank) = modelCombinePhase(models, vs[lastLen:], phase, curve,
                                           bank, tt, False, restBank, conf,
                                           arg)
            #models=selectModelCombine(models,vs,phase1,curve1,bank1,tt1,restBank,conf,arg,N)
            #print "model",models[0][1],models[1][1]
            #del vs[0:30000]
            vs = []
            lastLen = len(vs)
    if len(vs) > 0:
        (phase, curve, bank, tt,
         restBank) = modelCombinePhase(models, vs[lastLen:], phase, curve,
                                       bank, tt, True, restBank, conf, arg)
    print curve
Exemplo n.º 2
0
def main():
    global model
    if checkpoint.ok:
        loader = data.Data(args)
        _model = model.Model(args, checkpoint)

        _loss = []
        if not args.test_only:
            ## only for optical flow
            if int(args.model_label) == 0:
                _loss.append(
                    loss.Loss(args,
                              checkpoint,
                              ls=args.loss_flow,
                              task='optical-flow'))
                # if args.loss_freg is not None:
                #     _loss.append(loss.Loss(args, checkpoint, ls = args.loss_freg))
                t = FlowTrainer(args, loader, _model, _loss, checkpoint)
            ## only for frame-recurrent
            elif int(args.model_label) == 1:
                _loss.append(
                    loss.Loss(args,
                              checkpoint,
                              ls=args.loss_denoise,
                              task='denoise'))
                t = FRTrainer(args, loader, _model, _loss, checkpoint)
            ## for frame-recurrent with optical-flow
            elif int(args.model_label) == 2:
                _loss.append(
                    loss.Loss(args,
                              checkpoint,
                              ls=args.loss_denoise,
                              task='denoise'))
                _loss.append(
                    loss.Loss(args,
                              checkpoint,
                              ls=args.loss_flow,
                              task='optical-flow'))
                t = Trainer(args, loader, _model, _loss, checkpoint)
            else:
                raise ValueError("args model_label can only equal to 0,1,2")

        else:
            _loss = None
        #exit(0)

        if not args.test_only:
            while not t.terminate():
                t.train()
                t.test()
        else:
            if int(args.model_label) == 0:
                t = FlowTester(args, loader, _model, checkpoint)
                t.test()
            else:
                t = Tester(args, loader, _model, checkpoint)
                t.test()

        checkpoint.done()
Exemplo n.º 3
0
def main():
    # args.model = 'EDSR'
    # args.n_resblocks = 32
    # args.n_feats = 256
    # args.res_scale = 0.1

    # args.save = 'EDSR_poster

    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 4
0
def main(): 
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            
            #args.model_init='kcsres'
            #args.test_only = True 
            #args.resume = -2 
            #args.batch_size = 16 
            #args.is_fcSim = False 

            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 5
0
def main():
    global model
    global loss
    if args.data_test == 'video':
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            share_model = model.Model(args, checkpoint)
            controller_model = model.Controller_Model(share_model, args)
            loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, share_model, loss, checkpoint)
            controller_t = Controller_Trainer(args, loader, controller_model,
                                              loss, checkpoint)
            cur_epoch = 0
            while not t.terminate():
                epoch_time = time.time()
                t.train()
                if cur_epoch >= args.controller_start_training:
                    controller_t.controller_train()
                t.test()
                cur_epoch += 1
                epoch_time = time.time() - epoch_time
                print(
                    'epochs time: {} h {} min '.format(int(epoch_time / 3600),
                                                       epoch_time / 60),
                    ' total time: {} h {} min'.format(
                        int(epoch_time * args.epochs / 3600),
                        epoch_time * args.epochs / 60 % 60))
            checkpoint.done()
Exemplo n.º 6
0
    def build(self):

        # MODEL
        model_name = self.cfg.model_name
        if model_name not in dir(model):
            model_name = "PDANet"
        self.model = getattr(model, model_name)(cfg=self.cfg).to(self.device)
        self.model = nn.DataParallel(self.model)
        if self.cfg.net_verbose:
            summary(self.model, (4, 224, 224))

        print('-' * 40)
        print(f"[*] Model: {self.cfg.model_name}")
        print(f"[*] Device: {self.device}")
        print(f"[*] Path: {self.cfg.data_dir}")

        # OPTIMIZER
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.cfg.lr_init)

        # LOSS
        self.criterion = loss.Loss(self.cfg, self.device)

        # SCHEDULER
        if self.cfg.scheduler == "step":
            self.lr_sch = lr_scheduler.StepLR(self.optimizer, **self.steplr)
        else:
            self.lr_sch = lr_scheduler.ReduceLROnPlateau(
                self.optimizer, **self.plateau)

        # INITIALIZE: dirs, ckpts, data loaders
        self._make_dir()
        self._load_ckpt()
        self._load_data()
        self.writer = SummaryWriter(log_dir=self.cfg.logs_path)
Exemplo n.º 7
0
def predict(images="", root_path="", ai_directory_path="", model_type="EDSR"):
    """
    :param images: image의 이름 (특화 프로젝트 때, 복수의 이미지 파일을 받아서 images로 명명됨)
    :param root_path: image가 저장된 디렉토리
    :param AI_directory_path: 모델이 저장된 디렉토리
    :param model_type:
    :return: 생성된 이미지 파일 경로+이름 (list)
    """
    if model_type == "EDSR":
        png_alpha_channel_remove(images, root_path)
        set_setting_value_edsr(images, root_path, ai_directory_path, use_cpu=False)
        torch.manual_seed(args.seed)
        checkpoint = utility.checkpoint(args)
        if checkpoint.ok:
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            result = t.test()  # return value is saved image path(and image name). list type.
            checkpoint.done()

            # for file_name in result 형태의 for문 형태는 result의 str원소를 변경할 수 없다.
            for i in range(len(result)):
                result[i] = result[i][result[i].rfind("\\") + 1:]
            return result  # `media` 디렉토리 내부에 존재하는 결과물 파일 이름을 반환
Exemplo n.º 8
0
def main():
    global model
    global loss
    if args.data_test == 'video':
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            share_model = model.Model(args, checkpoint)
            loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, share_model, loss, checkpoint)
            while not t.terminate():
                epoch_time = time.time()
                t.train()
                t.final_test()
                epoch_time = time.time() - epoch_time
                print(
                    'epochs time: {} h {} min '.format(int(epoch_time / 3600),
                                                       epoch_time / 60),
                    ' total time: {} h {} min'.format(
                        int(epoch_time * args.epochs / 3600),
                        epoch_time * args.epochs / 60 % 60))

            checkpoint.done()
Exemplo n.º 9
0
def t8(tag="超出值", Rate=0.0):
    import loss
    import random
    print tag
    curve = loss.Loss(1.0)
    conf = Conf()
    conf.HandSize = 1.0
    for line in sys.stdin:
        p = line.find(tag)
        if p >= 0:
            p1 = line.rfind("\t")
            try:
                v = float(line[p1 + 1:-1])
            except:
                continue
            #if random.random() < Rate:
            #	continue
            if abs(v) < 1e-6:
                continue
            print v
            #if v > 0.3:
            #	print "line:",line
            #v=0.01*v
            #if curve.now < 0:
            #	continue
            curve.addTime()
            e = curve.now * v
            curve.addE(e)
            curve.addWin(v, conf)
    conf.YearFold = 1.0
    print curve.confStr(conf)
Exemplo n.º 10
0
    def __init__(self, sess, summaries_dir, batch_size, input_z_size, emb_size,
                 class_num, code_dim, img_size):
        self.sess = sess
        # Function
        net = nets.Nets(input_z_size, emb_size, class_num, code_dim, img_size)
        self.generator = net.generator
        self.discriminator = net.discriminator

        los = loss.Loss(batch_size)
        self.class_loss = los.class_loss
        self.class_loss2 = los.class_loss2
        self.vae_loss = los.vae_loss
        self.percept_loss = los.percept_loss
        self.tv_loss = los.tv_loss

        self.opers = ops.Ops()

        self.batch_size = batch_size
        self.input_z_size = input_z_size
        self.class_num = class_num
        self.code_dim = code_dim
        self.img_size = img_size
        self.emb_size = emb_size
        self.gen_gan_weight = 1
        self.gen_dis_weight = 1
        self.dis_gan_weight = 1
        self.dis_dis_weight = 1
        self.summaries_dir = summaries_dir
        self.build_model()
Exemplo n.º 11
0
def selectModelCombine(models, vs, phase, curve, bank, tt, restBank, conf, arg,
                       N):
    if len(models) == 2:
        MR = N / (100000.0 + N)
        bw1 = models[0][1]
        bw2 = models[1][1]
        m2[-1][0] = bw1
        m2[-1][1] = bw2
        bmax = -1000000
        #(curve.now-baseRate)/(curve.maxTrack+baseRate*0.1)
        for w1, w2, wphase in m2:
            models[0][1] = w1
            models[1][1] = w2
            phase1 = copy.deepcopy(phase)
            curve1 = loss.Loss(curve.now)
            bank1 = copy.deepcopy(bank)
            tt1 = tt
            restBank1 = restBank
            modelCombinePhase(models, vs, phase1, curve1, bank1, tt1, False,
                              restBank1, conf, arg)
            wphase.add(curve1.now / curve1.start - 1.0)
            bnow = wphase.avg(WK) - wphase.dev(WK)
            print "w1", w1, "w2", w2, "avg", wphase.avg(WK), "dev", wphase.dev(
                WK
            ), "bnow", bnow, "bmax", bmax, "rate", curve1.now / curve1.start - 1.0, "track", curve1.maxTrack
            if bnow > bmax:
                print "w1", w1, "w2", w2, "bnow", bnow, "bmax", bmax
                bw1 = w1
                bw2 = w2
                bmax = bnow
        models[0][1] = bw1 * MR + m2[-1][0] * (1.0 - MR)
        models[1][1] = bw2 * MR + m2[-1][1] * (1.0 - MR)
        print "bw12", bw1, bw2
        print "best models:", models[0][1], models[1][1]
    return models
Exemplo n.º 12
0
def t4():
    v = 1.0
    curve = loss.Loss(1.0)
    for line in sys.stdin:
        curve.addTime(5500)
        curve.addE(float(line.strip()) * curve.now)
        print curve.now
    print curve
Exemplo n.º 13
0
def set_up_experiment(params, experiment, resume=None):

    # Create experiment directory
    if resume:
        params["experiment_dir"] = os.path.split(resume)[
            0]  # Use existing folder
    else:
        params["experiment_dir"] = utils.make_dir_with_date(
            params["experiment_dir"], "fastdepth")  # New folder

    ## --------------- Model --------------- ##
    model, optimizer_state_dict = utils.load_model(params, resume)

    # Configure GPU
    params["gpus"] = params["device"]
    if isinstance(params["device"], int) and torch.cuda.is_available():
        params["device"] = torch.device("cuda:{}".format(params["device"]))
    elif isinstance(params["device"], list) and torch.cuda.is_available():
        model = nn.DataParallel(model, device_ids=params["device"])
        params["device"] = torch.device("cuda:{}".format(params["device"][0]))
    else:
        params["device"] = "cpu"

    # Send model to GPU(s)
    # This must be done before optimizer is created
    # if a model state_dict is being loaded
    model.to(params["device"])

    ## --------------- Loss --------------- ##
    criterion = loss.Loss(params["loss"])

    ## --------------- Optimizer --------------- ##
    optimizer = optimize.get_optimizer(model, params)

    if optimizer_state_dict:
        optimizer.load_state_dict(optimizer_state_dict)

    # Load optimizer tensors onto GPU if necessary
    utils.optimizer_to_gpu(optimizer)

    ## --------------- LR Scheduler --------------- ##
    if resume:
        scheduler = optim.lr_scheduler.StepLR(
            optimizer,
            step_size=params["lr_epoch_step_size"],
            gamma=0.1,
            last_epoch=params["start_epoch"])
    else:
        scheduler = optim.lr_scheduler.StepLR(
            optimizer, step_size=params["lr_epoch_step_size"], gamma=0.1)

    print_params(params)
    log_comet_parameters(experiment, params)

    ## --------------- Dataset --------------- ##
    train_loader, val_loader, test_loader = load_dataset(params)

    return params, train_loader, val_loader, test_loader, model, criterion, optimizer, scheduler
Exemplo n.º 14
0
def main():
    global model
    if checkpoint.ok:
        loader = data.Data(args)
        model = model.Model(args, checkpoint)
        loss = loss.Loss(args, checkpoint) if not args.test_only else None
        t = Trainer(args, loader, model, loss, checkpoint)

        t.test()
        checkpoint.done()
Exemplo n.º 15
0
 def inference(*targs, **kwargs):
     dev = kwargs['device']
     if dev == 'cpu':
         print("device = ",dev )
         args.cpu = True
         net = model.Model(args, checkpoint)
         loss = loss.Loss(args, checkpoint) if not args.test_only else None
         t = Trainer(args, loader, net, loss, checkpoint)
         psnr = t.test()
         print("psnr = ",psnr)
     elif dev == 'cuda':
         print("device = ",dev )
         args.cpu = False
         net = model.Model(args, checkpoint)
         loss = loss.Loss(args, checkpoint) if not args.test_only else None
         t = Trainer(args, loader, net, loss, checkpoint)
         psnr = t.test()
         print("psnr = ",psnr)
     return psnr
Exemplo n.º 16
0
def main():
    global model
    if checkpoint.ok:
        loader = data.Data(args)
        _model = model.Model(args, checkpoint)
        _loss = loss.Loss(args, checkpoint) if not args.test_only else None
        t = Trainer(args, loader, _model, _loss, checkpoint)
        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()
Exemplo n.º 17
0
def main():

    timer_HvdInit = utility.timer()
    # initialize horovod.torch
    hvd.init()

    # cuda device flag
    args.cuda = args.hvd and torch.cuda.is_available()
    print("args.cuda: " + str(args.cuda))
    print("args.hvd: " + str(args.hvd))
    print("cuda available: " + str(torch.cuda.is_available()))
    # pinging local GPU to process
    if args.cuda:
        torch.cuda.set_device(hvd.local_rank())
        print("hvd local rank:" + str(hvd.local_rank()))
        torch.cuda.manual_seed(args.seed)
    print("Horovod init time elapsed: " + str(timer_HvdInit.toc()))
    cudnn.benchmark = True

    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            # added support for distributed training dataset loading
            # param added: hvd
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None

            # wrapping optimizer with horovod distributed support
            # param added: hvd
            t = Trainer(args, loader, _model, _loss, checkpoint)
            timer_HvdBcast1 = utility.timer()
            hvd.broadcast_parameters(_model.state_dict(), root_rank=0)
            print("Hvd Bcast params time elapsed: " +
                  str(timer_HvdBcast1.toc()))
            timer_HvdBcast2 = utility.timer()
            hvd.broadcast_optimizer_state(t.optimizer, root_rank=0)
            print("Hvd Bcast optimizer state time elapsed: " +
                  str(timer_HvdBcast2.toc()))
            # Broadcast the initial variable states from rank 0 to all other processes
            while not t.terminate():
                timer_TrainLoop = utility.timer()
                t.train()
                print("Single loop time elapsed: " +
                      str(timer_TrainLoop.toc()))

            checkpoint.done()
Exemplo n.º 18
0
def create_model(opt):
    if opt.model == 'LRCN':
        model = models.LRCNModel(opt)
    # elif opt.model == 'Simple':
    #     model = models.SimpleModel(opt)
    elif opt.model == 'ConvLSTM':
        model = models.ConvLSTM(opt)
    else:
        raise ValueError('Unrecognized opt.mode={}'.format(opt.model))
    criterion = loss.Loss(opt)
    if opt.gpu_ids:
        model.cuda()
        torch.backends.cudnn.benchmark = True
    return model, criterion
Exemplo n.º 19
0
def main():
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            # print(len(loader.loader_train))
            # for d in loader.loader_train:
            #     print(d[0].shape)
            #     print(d[1].shape)
            #     print(d[2])
            #     print(d[3])
            #
            #     break;
            #exit(0)
            _model = model.Model(args, checkpoint)
            if args.model == "SSL":
                if not args.test_only:
                    _loss = [loss.Loss(args, checkpoint), loss.Loss(args, checkpoint, ls=args.loss_ssl)]
                    ## Relative Loss
                    if args.loss_rel is not None:
                        _loss.append(loss.Loss(args, checkpoint, ls=args.loss_rel))
                else:
                    _loss = None
                t = SSL_Trainer(args, loader, _model, _loss, checkpoint)
            else:
                _loss = loss.Loss(args, checkpoint) if not args.test_only else None
                t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 20
0
def main():
    global model
    global loss
    if args.data_test == 'video':
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            share_model = model.Model(args, checkpoint)
            loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, share_model, loss, checkpoint)
            t.derive()
            checkpoint.done()
Exemplo n.º 21
0
def main(checkpoint):
    if checkpoint.ok:
        loader = data.Data(args)
        _model = model.Model(args, checkpoint)
        _loss = loss.Loss(args, checkpoint) if not args.test_only else None
        t = Trainer(args, loader, _model, _loss, checkpoint)

        if args.feature_map:
            t.get_feature_maps()
            return

        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()
Exemplo n.º 22
0
def main():
    
    global model

    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            
            # Load Data --> Go to data.Data
            # Class Data defined in /src/loader --> how do we know it is calling that?
                # It calls using import data --> data is a package now due to __init__.py 
                # Any .py file can be imported 
            # Why is Data class defined in .py file?
            
            # Defining a lot of attributes of loader class, including where to load test images
            # But we have not actually loaded data --> only in t.test()
            loader = data.Data(args)
        
            # model defined in src/model/__init__.py
            # important uses from args: precision, scale, cpu, m_GPUs, 
                # define method that creates forward model 
                # Don't need detail for now
            _model = model.Model(args, checkpoint)
            
        
            # model defined in src/loss/__init__.py
            # define loss function - default set to L1 at args (option.py)
                # 

            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            
            # import class directly from trainer.py defined in src (same directory)
            # checkpoint - defined as cpk inside
            # Creating test, and train class on t -- but have not called the 
            t = Trainer(args, loader, _model, _loss, checkpoint)
            
            
            while not t.terminate():
                t.train()
                t.test()
                
            
            checkpoint.done()
Exemplo n.º 23
0
def main():
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 24
0
def main():
    device = torch.device('cpu' if args.cpu else f'cuda:{args.gpu_id}')
    if checkpoint.ok:
        loader = data.Data(args)
        t_model = SplitSR(args, is_teacher=True).to(device)
        args.is_student = True
        s_model = model.Model(args, checkpoint)
        # raise ValueError('not expected model = {}'.format(args.model))
        if args.teach_pretrain is not None:
            t_checkpoint = torch.load(args.teach_pretrain)
            t_model.load_state_dict(t_checkpoint)

        _loss = loss.Loss(args, checkpoint) if not args.test_only else None
        t = Trainer(args, loader, t_model, s_model, _loss, checkpoint)

        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()
Exemplo n.º 25
0
def main():
    global model
    torch.cuda.device_count()
    torch.cuda.get_device_name(0)
    if args.data_test == 'video':
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            model = model.Model(args, checkpoint)
            loss = l.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, model, loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
def main():
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            print('    Total params: %.2fM' %
                  (sum(p.numel() for p in _model.parameters()) / 1000000.0))
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 27
0
def main():
    # Test dataset 
    args.model = 'fccnn1mw'
    args.data_test = 'Set5'
    args.data_test = args.data_test.split('+')
    args.resume = -1
    args.n_resblocks = 3
    args.test_only = True
    args.save_results = True 
    args.save_gt = True
    args.save = args.model + '_b' + str(args.n_resblocks) + 'f' + str(args.n_feats) + \
            's' + str(args.batch_size) +'sig' + str(args.sigma)

    # Load model 
    args.pretrain = '../experiment/' + args.save + '/model/model_best.pt'
    _model = model.Model(args, checkpoint)
    
    loader = data.Data(args)
    _loss = loss.Loss(args, checkpoint) if not args.test_only else None
    t = Trainer(args, loader, _model, _loss, checkpoint)
    t.test() 
Exemplo n.º 28
0
def main():
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            number_parameters = sum(
                map(lambda x: x.numel(), _model.parameters()))
            print(number_parameters)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 29
0
def main():
    global model
    string = 'dir_data'
    print(getattr(args, string))
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:
            loader = data.Data(args)
            model_t = model.Model(args, checkpoint)
            model_s = model.Model(args, checkpoint, student=True)
            model = [model_t, model_s]
            loss_ = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, model, loss_, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Exemplo n.º 30
0
def main(args):
    ckpt_ = checkpoint.Checkpoint(args)
    # data loader
    dataloader_ = data.Data(args)
    # model build up
    model_ = model.Model(args, ckpt_)
    # loss setting
    loss_ = loss.Loss(args, ckpt_)
    # check module for visualization and gradient check
    check_ = check.check(model_)
    # class for training and testing
    trainer_ = trainer.Trainer(args, model_, loss_, dataloader_, ckpt_, check_)
    if args.test:
        trainer_.test()
        return

    # train
    # train with freeze first
    if args.freeze > 0:
        print('freeze base_params for {} epochs'.format(args.freeze))
    for par in ckpt_.base_params:
        par.requires_grad = False
        if hasattr(model_.get_model(), 'base_params'):
            for par in model_.get_model().base_params:
                par.requires_grad = False

    optim_tmp = optimizer.make_optimizer(args, model_)
    for i in range(args.freeze):
        trainer_.train(optim_tmp)

    # start training
    for par in model_.parameters():
        par.requires_grad = True

    for i in range(trainer_.epoch, args.epochs):
        trainer_.train(trainer_.optimizer)
        if args.test_every != 0 and (i + 1) % args.test_every == 0:
            trainer_.test()