예제 #1
0
    def __init__(
        self,
        input_shape,
        lr=1e-4,
        n_epochs=20,
        z_dim=512,
        model_feature_map_sizes=(16, 64, 256, 1024),
        use_geco=False,
        beta=0.01,
        ce_factor=0.5,
        score_mode="combi",
        load_path=None,
        log_dir=None,
        logger="visdom",
        print_every_iter=100,
        data_dir=None,
    ):

        self.score_mode = score_mode
        self.ce_factor = ce_factor
        self.beta = beta
        self.print_every_iter = print_every_iter
        self.n_epochs = n_epochs
        self.batch_size = input_shape[0]
        self.z_dim = z_dim
        self.use_geco = use_geco
        self.input_shape = input_shape
        self.logger = logger
        self.data_dir = data_dir

        log_dict = {}
        if logger is not None:
            log_dict = {
                0: (logger),
            }
        self.tx = PytorchExperimentStub(
            name="cevae",
            base_dir=log_dir,
            config=fn_args_as_config,
            loggers=log_dict,
        )

        cuda_available = torch.cuda.is_available()
        self.device = torch.device("cuda" if cuda_available else "cpu")

        self.model = VAE(input_size=input_shape[1:],
                         z_dim=z_dim,
                         fmap_sizes=model_feature_map_sizes).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=lr)

        self.vae_loss_ema = 1
        self.theta = 1

        if load_path is not None:
            PytorchExperimentLogger.load_model_static(
                self.model, os.path.join(load_path, "vae_final.pth"))
            time.sleep(5)
예제 #2
0
    def __init__(
        self,
        input_shape,
        lr=1e-4,
        n_epochs=20,
        z_dim=512,
        model_feature_map_sizes=(16, 64, 256, 1024),
        load_path=None,
        log_dir=None,
        logger="visdom",
        print_every_iter=100,
        data_dir=None,
    ):

        self.print_every_iter = print_every_iter
        self.n_epochs = n_epochs
        self.batch_size = input_shape[0]
        self.z_dim = z_dim
        self.input_shape = input_shape
        self.logger = logger
        self.data_dir = data_dir

        log_dict = {}
        if logger is not None:
            log_dict = {
                0: (logger),
            }
        self.tx = PytorchExperimentStub(
            name="ae3d",
            base_dir=log_dir,
            config=fn_args_as_config,
            loggers=log_dict,
        )

        cuda_available = torch.cuda.is_available()
        self.device = torch.device("cuda" if cuda_available else "cpu")

        self.model = AE(
            input_size=input_shape[1:],
            z_dim=z_dim,
            fmap_sizes=model_feature_map_sizes,
            conv_op=torch.nn.Conv3d,
            tconv_op=torch.nn.ConvTranspose3d,
        ).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=lr)

        if load_path is not None:
            PytorchExperimentLogger.load_model_static(
                self.model, os.path.join(load_path, "ae_final.pth"))
            time.sleep(5)
예제 #3
0
    def getAlgo(self, run_mode, model_type=None, recipe=None):
        # 创建基本的algo
        # basic_kws永远是手动配置的,而其它两个只需要第一次手动配置,以后就会在路径中读取
        basic_kws = None            # 读取基本配置
        if basic_kws == None:
            from .configure import BASIC_KWS
            basic_kws = BASIC_KWS

        if run_mode in ['validate', 'statistics']:
            algo = Algorithm(basic_kws=basic_kws, train_kws={'model_type': model_type})
            algo.__dict__['run'] = self.FF.getFunction('run', run_mode)
            return algo

        if not basic_kws['load']:
            from .configure import TRAIN_KWS, OTHER_KWS
            train_kws = TRAIN_KWS

            assert recipe is not None, '未指定recipe'
            self.FF.getFunction('modify_train_kws', recipe, OTHER_KWS)(train_kws)

            assert model_type is not None, '未指定model_type'
            train_kws['recipe'] = recipe
            train_kws['model_type'] = model_type
            need_to_save_config = True

        else:
            train_kws = AlgoFactory.load_config(os.path.join(basic_kws['load_path'], '../config/train_kws.json'))            # 读取训练配置
            model_type = train_kws['model_type']
            if run_mode == 'train':
                need_to_save_config = True
            else:
                need_to_save_config = False

        algo = Algorithm(basic_kws=basic_kws, train_kws=train_kws)

        # 为algo加入模型,在这里都是字符串,后面可能要转成对象,in_channels也在后面加
        if not basic_kws['load']:
            from .configure import CONFIGURE_DICT
            model_kws = CONFIGURE_DICT[model_type]
        else:
            model_kws = AlgoFactory.load_config(os.path.join(basic_kws['load_path'], '../config/model_kws.json'))

        if need_to_save_config:
            ex_dir = algo.tx.elog.work_dir
            AlgoFactory.save_config(data=train_kws, filename=os.path.join(basic_kws['log_dir'], ex_dir, 'config/train_kws.json'))
            AlgoFactory.save_config(data=model_kws, filename=os.path.join(basic_kws['log_dir'], ex_dir, 'config/model_kws.json'))

        self.FF.getFunction('modify_model_kws', train_kws)(model_kws)
        model = AlgoFactory.getModel(model_type=model_type, model_kws=model_kws).to(DEVICE)
        optimizer = torch.optim.Adam(model.parameters(), lr=train_kws['lr'])

        if basic_kws['load']:
            model_path = os.path.join(basic_kws['load_path'], 'model.pth')
            if not os.path.exists(model_path):
                raise FileNotFoundError(f'文件{model_path}不存在')
            PytorchExperimentLogger.load_model_static(model, model_path)
            time.sleep(2)

        algo.__setattr__('model', model)
        algo.__setattr__('optimizer', optimizer)

        # 为algo设置函数
        dataset_functions, algo_functions = self.getFunctions(train_kws)
        algo_functions['run'] = self.FF.getFunction('run', run_mode)
        algo.__setattr__('dataset_functions', dataset_functions)
        algo.__dict__.update(algo_functions)

        return algo
예제 #4
0
    def __init__(
        self,
        input_shape,
        lr=1e-4,
        critic_iters=1,
        gen_iters=5,
        n_epochs=10,
        gp_lambda=10,
        z_dim=512,
        print_every_iter=20,
        plot_every_epoch=1,
        log_dir=None,
        load_path=None,
        logger="visdom",
        data_dir=None,
        use_encoder=True,
        enocoder_feature_weight=1e-4,
        encoder_discr_weight=0.0,
    ):

        self.plot_every_epoch = plot_every_epoch
        self.print_every_iter = print_every_iter
        self.gp_lambda = gp_lambda
        self.n_epochs = n_epochs
        self.gen_iters = gen_iters
        self.critic_iters = critic_iters
        self.size = input_shape[2]
        self.batch_size = input_shape[0]
        self.input_shape = input_shape
        self.z_dim = z_dim
        self.logger = logger
        self.data_dir = data_dir
        self.use_encoder = use_encoder
        self.enocoder_feature_weight = enocoder_feature_weight
        self.encoder_discr_weight = encoder_discr_weight

        log_dict = {}
        if logger is not None:
            log_dict = {
                0: (logger),
            }
        self.tx = PytorchExperimentStub(
            name="fanogan",
            base_dir=log_dir,
            config=fn_args_as_config,
            loggers=log_dict,
        )

        cuda_available = torch.cuda.is_available()
        self.device = torch.device("cuda" if cuda_available else "cpu")

        self.n_image_channels = input_shape[1]

        self.gen = IWGenerator(self.size,
                               z_dim=z_dim,
                               n_image_channels=self.n_image_channels)
        self.dis = IWDiscriminator(self.size,
                                   n_image_channels=self.n_image_channels)

        self.gen.apply(weights_init)
        self.dis.apply(weights_init)

        self.optimizer_G = torch.optim.Adam(self.gen.parameters(),
                                            lr=lr,
                                            betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(self.dis.parameters(),
                                            lr=lr,
                                            betas=(0.5, 0.999))

        self.gen = self.gen.to(self.device)
        self.dis = self.dis.to(self.device)

        if self.use_encoder:
            self.enc = IWEncoder(self.size,
                                 z_dim=z_dim,
                                 n_image_channels=self.n_image_channels)
            self.enc.apply(weights_init)
            self.enc = self.enc.to(self.device)
            self.optimizer_E = torch.optim.Adam(self.enc.parameters(),
                                                lr=lr,
                                                betas=(0.5, 0.999))

        self.z = torch.randn(self.batch_size, z_dim).to(self.device)

        if load_path is not None:
            PytorchExperimentLogger.load_model_static(
                self.dis, os.path.join(load_path, "dis_final.pth"))
            PytorchExperimentLogger.load_model_static(
                self.gen, os.path.join(load_path, "gen_final.pth"))
            if self.use_encoder:
                try:
                    pass
                    # PytorchExperimentLogger.load_model_static(self.enc, os.path.join(load_path, "enc_final.pth"))
                except Exception:
                    warnings.warn("Could not find an Encoder in the directory")
            time.sleep(5)