Beispiel #1
0
    def test_check_output(self):
        fluid.enable_imperative()
        linear = paddle.nn.Conv2d(2, 3, 3)
        before_weight = linear.weight.numpy()
        if self.dim == None:
            self.dim = -1

        if self.dim != -1:
            self.dim = (self.dim + len(before_weight)) % len(before_weight)
        wn = weight_norm(linear, dim=self.dim)
        outputs = []
        for name, data in self.data.items():
            output = linear(fluid.dygraph.to_variable(data))
            outputs.append(output.numpy())
        after_weight = linear.weight
        self.actual_outputs = [
            linear.weight_g.numpy(),
            linear.weight_v.numpy()
        ]

        expect_output = self.weight_normalize(before_weight, self.dim)

        for expect, actual in zip(expect_output, self.actual_outputs):
            self.assertTrue(
                numpy.allclose(numpy.array(actual), expect, atol=0.001))
Beispiel #2
0
def main(cfgs):
    fluid.enable_imperative() 
    if 'resnet' in cfgs.netG:
        from configs.resnet_configs import get_configs
    else:
        raise NotImplementedError
    configs = get_configs(config_name=cfgs.config_set)
    configs = list(configs.all_configs())

    data_loader, id2name = create_eval_data(cfgs, direction=cfgs.direction)
    model = TestModel(cfgs)
    model.setup()  ### load_network

    ### this input used in compute model flops and params
    for data in data_loader:
        model.set_input(data)
        break

    npz = np.load(cfgs.real_stat_path)
    results = []
    for config in configs:
        fakes, names = [], []
        flops, _ = model.profile(config=config)
        s_time = time.time()
        for i, data in enumerate(data_loader()):
            model.set_input(data)
            model.test(config)
            generated = model.fake_B
            fakes.append(generated.detach().numpy())
            name = id2name[i]
            save_path = os.path.join(cfgs.save_dir, 'test' + str(config))
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            save_path = os.path.join(save_path, name)
            names.append(name)
            if i < cfgs.num_test:
               image = util.tensor2img(generated)
               util.save_image(image, save_path)

        result = {'config_str': encode_config(config), 'flops': flops} ### compute FLOPs

        fluid.disable_imperative()
        if not cfgs.no_fid:
            block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
            inception_model = InceptionV3([block_idx])
            fid = get_fid(fakes, inception_model, npz, cfgs.inception_model_path, batch_size=cfgs.batch_size, use_gpu=cfgs.use_gpu)
            result['fid'] = fid
        fluid.enable_imperative() 

        e_time = (time.time() - s_time) / 60
        result['time'] = e_time
        print(result)
        results.append(result)

    if not os.path.exists(cfgs.save_dir):
        os.makedirs(os.path.dirname(cfgs.save_dir))
    save_file = os.path.join(cfgs.save_dir, 'search_result.pkl')
    with open(save_file, 'wb') as f:
        pickle.dump(results, f)
    print('Successfully finish searching!!!')
Beispiel #3
0
    def __init__(
        self,
        use_cuda=False,
        tree=True,
        prob=False,
        use_pos=False,
        model_files_path=None,
        buckets=False,
        batch_size=None,
        encoding_model="ernie-lstm",
    ):
        if model_files_path is None:
            if encoding_model in ["lstm", "transformer", "ernie-1.0", "ernie-tiny", "ernie-lstm"]:
                model_files_path = self._get_abs_path(os.path.join("./model_files/", encoding_model))
            else:
                raise KeyError("Unknown encoding model.")

            if not os.path.exists(model_files_path):
                try:
                    utils.download_model_from_url(model_files_path, encoding_model)
                except Exception as e:
                    logging.error("Failed to download model, please try again")
                    logging.error("error: {}".format(e))
                    raise e

        args = [
            "--model_files={}".format(model_files_path), "--config_path={}".format(self._get_abs_path('config.ini')),
            "--encoding_model={}".format(encoding_model)
        ]

        if use_cuda:
            args.append("--use_cuda")
        if tree:
            args.append("--tree")
        if prob:
            args.append("--prob")
        if batch_size:
            args.append("--batch_size={}".format(batch_size))

        args = ArgConfig(args)
        # Don't instantiate the log handle
        args.log_path = None
        self.env = Environment(args)
        self.args = self.env.args
        fluid.enable_imperative(self.env.place)
        self.model = load(self.args.model_path)
        self.model.eval()
        self.lac = None
        self.use_pos = use_pos
        # buckets=None if not buckets else defaults
        if not buckets:
            self.args.buckets = None
        if args.prob:
            self.env.fields = self.env.fields._replace(PHEAD=Field("prob"))
        if self.use_pos:
            self.env.fields = self.env.fields._replace(CPOS=Field("postag"))
        # set default batch size if batch_size is None and not buckets
        if batch_size is None and not buckets:
            self.args.batch_size = 50
Beispiel #4
0
 def test_check_output(self):
     fluid.enable_imperative()
     linear = paddle.nn.Conv2d(2, 3, 3)
     before_weight = linear.weight
     wn = weight_norm(linear, dim=self.dim)
     rwn = remove_weight_norm(linear)
     after_weight = linear.weight
     self.assertTrue(
         numpy.allclose(before_weight.numpy(),
                        after_weight.numpy(),
                        atol=0.001))
Beispiel #5
0
    def __init__(self,
                 use_cuda=False,
                 tree=True,
                 prob=False,
                 use_pos=False,
                 model_files_path=None,
                 buckets=False,
                 batch_size=None):
        if model_files_path is None:
            model_files_path = self._get_abs_path('./model_files/baidu')
            if not os.path.exists(model_files_path):
                try:
                    utils.download_model_from_url(model_files_path)
                except Exception as e:
                    logging.error("Failed to download model, please try again")
                    logging.error(f"error: {e}")
                    return

        args = [
            f"--model_files={model_files_path}",
            f"--config_path={self._get_abs_path('config.ini')}"
        ]

        if use_cuda:
            args.append("--use_cuda")
        if tree:
            args.append("--tree")
        if prob:
            args.append("--prob")
        if batch_size:
            args.append(f"--batch_size={batch_size}")

        args = ArgConfig(args)
        # Don't instantiate the log handle
        args.log_path = None
        self.env = Environment(args)
        self.args = self.env.args
        fluid.enable_imperative(self.env.place)
        self.model = load(self.args.model_path)
        self.lac = None
        self.use_pos = use_pos
        # buckets=None if not buckets else defaults
        if not buckets:
            self.args.buckets = None
        if args.prob:
            self.env.fields = self.env.fields._replace(PHEAD=Field('prob'))
        if self.use_pos:
            self.env.fields = self.env.fields._replace(CPOS=Field('postag'))
        # set default batch size if batch_size is None and not buckets
        if batch_size is None and not buckets:
            self.args.batch_size = 50
    def evaluate_model(self, step):
        ret = {}
        self.is_best = False
        save_dir = os.path.join(self.cfgs.save_dir, 'mobile', 'eval',
                                str(step))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        self.netG_A.eval()
        self.netG_B.eval()
        for direction in ['AtoB', 'BtoA']:
            eval_dataloader = getattr(self, 'eval_dataloader_' + direction)
            id2name = getattr(self, 'name_' + direction)
            fakes = []
            cnt = 0
            for i, data_i in enumerate(eval_dataloader):
                self.set_single_input(data_i)
                self.test_single_side(direction)
                fakes.append(self.fake_B.detach().numpy())
                for j in range(len(self.fake_B)):
                    if cnt < 10:
                        name = 'fake_' + direction + str(i + j) + '.png'
                        save_path = os.path.join(save_dir, name)
                        fake_im = util.tensor2img(self.fake_B[j])
                        util.save_image(fake_im, save_path)
                    cnt += 1

            suffix = direction[-1]
            fluid.disable_imperative()
            fid = get_fid(fakes, self.inception_model,
                          getattr(self, 'npz_%s' % direction[-1]),
                          self.cfgs.inception_model)
            fluid.enable_imperative(place=self.cfgs.place)
            if fid < getattr(self, 'best_fid_%s' % suffix):
                self.is_best = True
                setattr(self, 'best_fid_%s' % suffix, fid)
            print("direction: %s, fid score is: %f, best fid score is %f" %
                  (direction, fid, getattr(self, 'best_fid_%s' % suffix)))
            fids = getattr(self, 'fids_%s' % suffix)
            fids.append(fid)
            if len(fids) > 3:
                fids.pop(0)
            ret['metric/fid_%s' % suffix] = fid
            ret['metric/fid_%s-mean' %
                suffix] = sum(getattr(self, 'fids_%s' % suffix)) / len(
                    getattr(self, 'fids_%s' % suffix))
            ret['metric/fid_%s-best' % suffix] = getattr(
                self, 'best_fid_%s' % suffix)

        self.netG_A.train()
        self.netG_B.train()
        return ret
Beispiel #7
0
def main(cfgs):
    fluid.enable_imperative()
    if cfgs.config_str is not None:
        assert 'super' in cfgs.netG or 'sub' in cfgs.netG
        config = decode_config(cfgs.config_str)
    else:
        assert 'super' not in cfgs.model
        config = None

    data_loader, id2name = create_eval_data(cfgs, direction=cfgs.direction)
    model = TestModel(cfgs)
    model.setup()  ### load_network

    fakes, names = [], []
    for i, data in enumerate(data_loader()):
        model.set_input(data)
        if i == 0 and cfgs.need_profile:
            flops, params = model.profile(config)
            print('FLOPs: %.3fG, params: %.3fM' % (flops / 1e9, params / 1e6))
            sys.exit(0)
        model.test(config)
        generated = model.fake_B
        fakes.append(generated.detach().numpy())
        name = id2name[i]
        print(name)
        save_path = os.path.join(cfgs.save_dir, 'test')
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        save_path = os.path.join(save_path, name)
        names.append(name)
        if i < cfgs.num_test:
            image = util.tensor2img(generated)
            util.save_image(image, save_path)

    fluid.disable_imperative()

    if not cfgs.no_fid:
        print('Calculating FID...')
        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        inception_model = InceptionV3([block_idx])
        npz = np.load(cfgs.real_stat_path)
        fid = get_fid(fakes, inception_model, npz, cfgs.inception_model_path)
        print('fid score: %#.2f' % fid)
    def evaluate_model(self, step):
        ret = {}
        self.is_best = False
        save_dir = os.path.join(self.cfgs.save_dir, 'distiller', 'eval',
                                str(step))
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        self.netG_student.eval()
        fakes = []
        cnt = 0
        for i, data_i in enumerate(self.eval_dataloader):
            id2name = self.name
            self.set_single_input(data_i)
            self.test()
            fakes.append(self.Sfake_B.detach().numpy())
            for j in range(len(self.Sfake_B)):
                if cnt < 10:
                    Sname = 'Sfake_' + str(i + j) + '.png'
                    Tname = 'Tfake_' + str(i + j) + '.png'
                    Sfake_im = util.tensor2img(self.Sfake_B[j])
                    Tfake_im = util.tensor2img(self.Tfake_B[j])
                    util.save_image(Sfake_im, os.path.join(save_dir, Sname))
                    util.save_image(Tfake_im, os.path.join(save_dir, Tname))
                cnt += 1

        suffix = self.cfgs.direction
        fluid.disable_imperative()
        fid = get_fid(fakes, self.inception_model, self.npz,
                      self.cfgs.inception_model)
        fluid.enable_imperative(place=self.cfgs.place)
        if fid < self.best_fid:
            self.is_best = True
            self.best_fid = fid
        print("fid score is: %f, best fid score is %f" % (fid, self.best_fid))
        self.fids.append(fid)
        if len(self.fids) > 3:
            self.fids.pop(0)
        ret['metric/fid'] = fid
        ret['metric/fid-mean'] = sum(self.fids) / len(self.fids)
        ret['metric/fid-best'] = self.best_fid

        self.netG_student.train()
        return ret
Beispiel #9
0
def main(cfgs):
    fluid.enable_imperative()
    config = decode_config(args.config_str)
    if args.model == 'mobile_resnet':
        from model.mobile_generator import MobileResnetGenerator as SuperModel
        from model.sub_mobile_generator import SubMobileResnetGenerator as SubModel
        input_nc, output_nc = args.input_nc, args.output_nc
        super_model = SuperModel(input_nc, output_nc, ngf=args.ngf, norm_layer=InstanceNorm, n_blocks=9)
        sub_model = SubModel(input_nc, output_nc, config=config, norm_layer=InstanceNorm, n_blocks=9)
    else:
        raise NotImplementedError

    load_network(super_model, args.input_path)
    transfer_weight(super_model, sub_model)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)
    save_path = os.path.join(args.output_path, 'final_net')
    fluid.save_dygraph(sub_model.state_dict(), save_path)
    print('Successfully export the subnet at [%s].' % save_path)
Beispiel #10
0
    def _get_abs_path(self, path):
        return os.path.normpath(
            os.path.join(os.path.dirname(os.path.abspath(__file__)), path))


if __name__ == '__main__':
    logging.info("init arguments.")
    args = ArgConfig()
    logging.info("init environment.")
    env = Environment(args)
    logging.info(f"Override the default configs\n{env.args}")
    logging.info(f"{env.WORD}\n{env.FEAT}\n{env.ARC}\n{env.REL}")
    logging.info(f"Set the max num of threads to {env.args.threads}")
    logging.info(
        f"Set the seed for generating random numbers to {env.args.seed}")
    logging.info(f"Run the subcommand in mode {env.args.mode}")

    fluid.enable_imperative(env.place)
    mode = env.args.mode
    if mode == "train":
        train(env)
    elif mode == "evaluate":
        evaluate(env)
    elif mode == "predict":
        predict(env)
    elif mode == "predict_q":
        predict_query(env)
    else:
        logging.error(f"Unknown task mode: {mode}.")
Beispiel #11
0
# 每次生成一个Batch,使用set_batch_generator配置数据源
def batch_generator_creator():
    def __reader__():
        for _ in range(BATCH_NUM):
            batch_image, batch_label = get_random_images_and_labels(
                [BATCH_SIZE, MNIST_IMAGE_SIZE], [BATCH_SIZE, MNIST_LABLE_SIZE])
            yield batch_image, batch_label

    return __reader__


BATCH_SIZE = 10

place = fluid.CPUPlace()  # 或者 fluid.CUDAPlace(0)
fluid.enable_imperative(place)
#
# # 使用sample数据生成器作为DataLoader的数据源
# data_loader1 = fluid.io.DataLoader.from_generator(capacity=10)
# data_loader1.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, places=place)
#
# # 使用sample list数据生成器作为DataLoader的数据源
# data_loader2 = fluid.io.DataLoader.from_generator(capacity=10)
# data_loader2.set_sample_list_generator(sample_list_generator_creator(), places=place)

# 使用batch数据生成器作为DataLoader的数据源
data_loader3 = fluid.io.DataLoader.from_generator(capacity=10)
data_loader3.set_batch_generator(batch_generator_creator(), places=place)

for data in data_loader3():
    image, label = data