def build_model(cfg, writer):
    print('Building model on ', end='', flush=True)
    t1 = time.time()
    device = torch.device('cuda:0')
    if cfg.TRAIN.is_cuda:
        model = Dual_net(input_channels=cfg.TRAIN.input_nc).to(device)
    else:
        model = Dual_net(input_channels=cfg.TRAIN.input_nc)
    # n_data = list(cfg.DATA.patch_size)
    # rand_data = torch.rand(1,1,n_data[0],n_data[1])
    # writer.add_graph(model, (rand_data,))

    cuda_count = torch.cuda.device_count()
    if cuda_count > 1:
        if cfg.TRAIN.batch_size % cuda_count == 0:
            print('%d GPUs ... ' % cuda_count, end='', flush=True)
            model = nn.DataParallel(model)
        else:
            raise AttributeError(
                'Batch size (%d) cannot be equally divided by GPU number (%d)'
                % (cfg.TRAIN.batch_size, cuda_count))
    else:
        print('a single GPU ... ', end='', flush=True)
    print('Done (time: %.2fs)' % (time.time() - t1))
    return model
示例#2
0
                base_path = '../../data/simple/test_raw'
            else:
                raise AttributeError('No this test mode!')
        else:
            test_path = '../test_result/complex'
            base_path = '../../data/complex/' + cfg.TEST.crop_way + '/test_raw'
        model_name = cfg.TEST.model_name
        save_path = os.path.join(test_path, model_name, 'result')
        model_path = cfg.TRAIN.save_path
        model_path = os.path.join(model_path, model_name)

        if not os.path.exists(save_path):
            os.makedirs(save_path)
        
        thresd = cfg.TEST.thresd
        model = Dual_net(input_channels=cfg.TRAIN.input_nc)
        ckpt = 'model.ckpt'
        ckpt_path = os.path.join(model_path, ckpt)
        checkpoint = torch.load(ckpt_path)

        new_state_dict = OrderedDict()
        state_dict = checkpoint['model_weights']
        for k, v in state_dict.items():
            # name = k[7:] # remove module.
            name = k
            new_state_dict[name] = v

        model.load_state_dict(new_state_dict)
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        model = model.to(device)
        else:
            test_path = '../test_result/complex'
            base_path = '../../data/complex/' + cfg.TEST.crop_way + '/test_raw'
        model_name = cfg.TEST.model_name
        save_path = os.path.join(test_path, model_name + '_aug_batch',
                                 'result')
        model_path = cfg.TRAIN.save_path
        model_path = os.path.join(model_path, model_name)

        if not os.path.exists(save_path):
            os.makedirs(save_path)

        thresd = cfg.TEST.thresd

        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        model = Dual_net(input_channels=cfg.TRAIN.input_nc).to(device)
        cuda_count = torch.cuda.device_count()
        if cuda_count > 1:
            if cfg.TRAIN.batch_size % cuda_count == 0:
                print('%d GPUs ... ' % cuda_count, end='', flush=True)
                model = nn.DataParallel(model)
            else:
                raise AttributeError(
                    'Batch size (%d) cannot be equally divided by GPU number (%d)'
                    % (cfg.TRAIN.batch_size, cuda_count))
        else:
            print('a single GPU ... ', end='', flush=True)

        ckpt = 'model.ckpt'
        ckpt_path = os.path.join(model_path, ckpt)
        if os.path.isfile(ckpt_path):