Пример #1
0
def get_model(mtype, **kwargs):
    """
    Create the model depending on the current params
    """
    if mtype == 'normal':
        model = EnsembleModel(**kwargs)
    elif mtype == 'multi_r':
        model_list = [ChowderModel(r) for r in kwargs['R_list']]
        model = EnsembleModel.from_model_list(model_list)
    elif mtype == "deepset":
        model = EnsembleModel(model_type=DeepSetChowder, **kwargs)
    else:
        raise NotImplementedError
    return model
Пример #2
0
def ensemble_predict(test_dataset, model, id2label, vote=True):

    # ckpt_path-ensemble.txt 模型路径列表
    with open('./ckpt_path-ensemble.txt', 'r', encoding='utf-8') as f:
        ensemble_dir_list = f.readlines()
        print('ENSEMBLE_DIR_LIST:{}'.format(ensemble_dir_list))
    model_path_list = [x.strip() for x in ensemble_dir_list]
    print('model_path_list:{}'.format(model_path_list))

    # device = torch.device(f'cuda:{GPU_IDS[0]}')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = EnsembleModel(model=model, model_path_list=model_path_list, device=device, lamb=lamb)
    labels = base_predict(test_dataset, model, id2label, ensemble=True, vote=True)
    return labels
Пример #3
0
def create_run_ensemble(model_state_list, 
                        n_layers,
                        grad_clip_value=5, 
                        seed=0, 
                        num_epochs=20,
                        learning_rate=0.001,
                        init_channels=get('init_channels'), 
                        batch_size=get('batch_size'), 
                        genotype_class='PCDARTS'):
    
    if not torch.cuda.is_available():
            logging.info('no gpu device available')
            sys.exit(1)

    gpu = 'cuda:0'
    np.random.seed(seed)
    torch.cuda.set_device(gpu)
    cudnn.benchmark = True
    torch.manual_seed(seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(seed)
    logging.info('gpu device = %s' % gpu)
    logging.info("config = %s", config)

    if data_augmentations is None:
        # You can add any preprocessing/data augmentation you want here
        data_augmentations = transforms.ToTensor()
    elif isinstance(type(data_augmentations), list):
        data_augmentations = transforms.Compose(data_augmentations)
    elif not isinstance(data_augmentations, transforms.Compose):
        raise NotImplementedError

    train_dataset = K49(data_dir, True, data_augmentations)
    test_dataset = K49(data_dir, False, data_augmentations)
    # train_dataset = KMNIST(data_dir, True, data_augmentations)
    # test_dataset = KMNIST(data_dir, False, data_augmentations)
    # Make data batch iterable
    # Could modify the sampler to not uniformly random sample
    
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

    genotype = eval("genotypes.%s" % genotype_class)
    dataset = dict()
    dims = []
    for i, model_state in enumerate(model_state_list):
        model = Network(init_channels, train_dataset.n_classes, n_layers, genotype)
        model.load_state_dict(torch.load(model_state))
        model.cuda()
        for p in model.parameters():
            p.requires_grad = False
        trn_labels = []
        trn_features = []
        if i == 0:
            for d,la in train_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                trn_labels.extend(la)
                trn_features.extend(o.cpu().data)
            test_labels = []
            test_features = []
            for d,la in test_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                test_labels.extend(la)
                test_features.extend(o.cpu().data)
            dataset['trn_labels'] = trn_labels
            dataset['test_labels'] = test_labels

        else:
            for d,la in train_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                trn_features.extend(o.cpu().data)
            test_labels = []
            test_features = []
            for d,la in test_loader:
                o = model(Variable(d.cuda()))
                o = o.view(o.size(0),-1)
                test_features.extend(o.cpu().data)            
        dataset['trn_features'].extend(trn_features)
        dims.extend(dataset['trn_features'][i][0].size(0))
        dataset['test_features'].extend(test_features)
    

    trn_feat_dset = FeaturesDataset(dataset['trn_features'][0],dataset['trn_features'][1],dataset['trn_features'][2],dataset['trn_labels'])
    test_feat_dset = FeaturesDataset(dataset['test_features'][0],dataset['test_features'][1],dataset['test_features'][2],dataset['test_labels'])
    trn_feat_loader = DataLoader(trn_feat_dset,batch_size=64,shuffle=True)
    test_feat_loader = DataLoader(val_feat_dset,batch_size=64)
    model = EnsembleModel(dims, out_size=train_dataset.n_classes)
    criterion = torch.nn.optim.CrossEntropyLoss
    criterion = criterion.cuda()
    optimizer = torch.nn.optim.SGD(model.parameters(), 
                                    lr=learning_rate, 
                                    momentum=0.9)   
    
    for epoch in range(num_epochs):
        epoch_loss, epoch_accuracy = fit(epoch,model,trn_feat_loader,critierion, training=True)
        val_epoch_loss , val_epoch_accuracy = fit(epoch,model, test_feat_loader, criterion, training=False)


    if save_model_str:
        # Save the model checkpoint, can be restored via "model = torch.load(save_model_str)"
        if not os.path.exists(save_model_str):
            os.mkdir(save_model_str)
        
        torch.save(model.state_dict(), os.path.join(save_model_str, time.ctime())) 
            elif signal == -1:
                side = 'sell'
            elif signal == 0:
                print('Signal is 0, staying flat')
                return
            else:
                raise ValueError(
                    f'Unexpected signal: {signal}, should have been 0, 1 or -1'
                )
            self.bit.enter_bracket(self.symbol,
                                   self.size,
                                   side=side,
                                   target_offset=self.target_offset,
                                   stop_offset=self.stop_offset)
        else:
            print('Already in a trade and not entering')


if __name__ == '__main__':  # where we call our Trader class defined above, in main
    model = EnsembleModel()
    trader = Trader('XBTUSD',
                    model,
                    500,
                    '1h',
                    target_offset=50,
                    stop_offset=50)
    try:
        trader.run()
    except:
        trader.live = False
Пример #5
0
    def compute(self, config, budget, *args, **kwargs):
        """
        Get model with hyperparameters from config generated by get_configspace()
        """
        if not torch.cuda.is_available():
            logging.info('no gpu device available')
            sys.exit(1)

        logging.info(f'Running config for {budget} epochs')
        gpu = 'cuda:0'
        np.random.seed(self.seed)
        torch.cuda.set_device(gpu)
        cudnn.benchmark = True
        torch.manual_seed(self.seed)
        cudnn.enabled = True
        torch.cuda.manual_seed(self.seed)
        logging.info('gpu device = %s' % gpu)
        logging.info("config = %s", config)

        ensemble_model = EnsembleModel(self.trained_models,
                                       dense_units=config['dense_units'],
                                       out_size=self.train_dataset.n_classes)
        ensemble_model = ensemble_model.cuda()

        logging.info("param size = %fMB",
                     utils.count_parameters_in_MB(ensemble_model))

        criterion = nn.CrossEntropyLoss()
        criterion = criterion.cuda()

        if config['optimizer'] == 'sgd':
            optimizer = torch.optim.SGD(ensemble_model.parameters(),
                                        lr=config['initial_lr'],
                                        momentum=config['sgd_momentum'],
                                        weight_decay=config['weight_decay'],
                                        nesterov=config['nesterov'])
        else:
            optimizer = get('opti_dict')[config['optimizer']](
                ensemble_model.parameters(),
                lr=config['initial_lr'],
                weight_decay=config['weight_decay'])

        if config['lr_scheduler'] == 'Cosine':
            lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                optimizer, int(budget))
        elif config['lr_scheduler'] == 'Exponential':
            lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                                  gamma=0.1)

        indices = list(
            np.random.randint(
                0,
                2 * len(self.train_dataset) // 3,
                size=len(self.train_dataset) //
                3))  #list(range(int(self.split*len(self.train_dataset))))
        valid_indices = list(
            np.random.randint(2 * len(self.train_dataset) // 3,
                              len(self.train_dataset),
                              size=len(self.train_dataset) // 6)
        )  #list(range(int(self.split*len(self.train_dataset)), len(self.train_dataset)))
        print("Training size=", len(indices))
        training_sampler = SubsetRandomSampler(indices)
        valid_sampler = SubsetRandomSampler(valid_indices)
        train_queue = torch.utils.data.DataLoader(dataset=self.train_dataset,
                                                  batch_size=self.batch_size,
                                                  sampler=training_sampler)

        valid_queue = torch.utils.data.DataLoader(dataset=self.train_dataset,
                                                  batch_size=self.batch_size,
                                                  sampler=valid_sampler)

        for epoch in range(int(budget)):
            logging.info('epoch %d lr %e', epoch, lr_scheduler.get_lr()[0])
            ensemble_model.drop_path_prob = config[
                'drop_path_prob'] * epoch / int(budget)

            train_acc, train_obj = ensemble_train(
                train_queue,
                ensemble_model,
                criterion,
                optimizer,
                grad_clip=config['grad_clip_value'])
            logging.info('train_acc %f', train_acc)
            lr_scheduler.step()

            valid_acc, valid_obj = infer(valid_queue, ensemble_model,
                                         criterion)
            logging.info('valid_acc %f', valid_acc)

        return ({
            'loss':
            valid_obj,  # Hyperband always minimizes, so we want to minimise the error, error = 1-accuracy
            'info':
            {}  # mandatory- can be used in the future to give more information
        })
Пример #6
0
def create_run_ensemble(model_description,
                        ensemble_config,
                        seed=get('seed'),
                        num_epochs=20,
                        data_dir='./data',
                        init_channels=get('init_channels'),
                        batch_size=get('batch_size'),
                        genotype_class='PCDARTS',
                        data_augmentations=None,
                        save_model_str=None):

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    gpu = 'cuda:0'
    np.random.seed(seed)
    torch.cuda.set_device(gpu)
    cudnn.benchmark = True
    torch.manual_seed(seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(seed)
    logging.info('gpu device = %s' % gpu)

    if data_augmentations is None:
        # You can add any preprocessing/data augmentation you want here
        data_augmentations = transforms.ToTensor()
    elif isinstance(type(data_augmentations), list):
        data_augmentations = transforms.Compose(data_augmentations)
    elif not isinstance(data_augmentations, transforms.Compose):
        raise NotImplementedError

    train_dataset = K49(data_dir, True, data_augmentations)
    test_dataset = K49(data_dir, False, data_augmentations)
    # train_dataset = KMNIST(data_dir, True, data_augmentations)
    # test_dataset = KMNIST(data_dir, False, data_augmentations)
    # Make data batch iterable
    # Could modify the sampler to not uniformly random sample

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=False)

    genotype = eval("genotypes.%s" % genotype_class)
    trained_models = []
    for i, model_state in enumerate(model_description.keys()):
        model = Network(
            init_channels, train_dataset.n_classes,
            model_description[model_state]['config']['n_conv_layers'],
            genotype)
        model.load_state_dict(
            torch.load(model_description[model_state]['model_path']))
        model.cuda()
        model.drop_path_prob = model_description[model_state]['config'][
            'drop_path_prob']
        trained_models.append(model)

    ensemble_model = EnsembleModel(trained_models,
                                   dense_units=ensemble_config['dense_units'],
                                   out_size=train_dataset.n_classes)
    ensemble_model = ensemble_model.cuda()

    summary(ensemble_model, input_size=(1, 28, 28))
    criterion = torch.nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    if ensemble_config['optimizer'] == 'sgd':
        optimizer = torch.optim.SGD(
            model.parameters(),
            lr=ensemble_config['initial_lr'],
            momentum=ensemble_config['sgd_momentum'],
            weight_decay=ensemble_config['weight_decay'],
            nesterov=ensemble_config['nesterov'])
    else:
        optimizer = get('opti_dict')[ensemble_config['optimizer']](
            model.parameters(),
            lr=ensemble_config['initial_lr'],
            weight_decay=ensemble_config['weight_decay'])

    if ensemble_config['lr_scheduler'] == 'Cosine':
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, num_epochs)
    elif ensemble_config['lr_scheduler'] == 'Exponential':
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                              gamma=0.1)

    print('Started Training')
    for epoch in range(num_epochs):
        logging.info('epoch %d lr %e', epoch, lr_scheduler.get_lr()[0])
        model.drop_path_prob = ensemble_config[
            'drop_path_prob'] * epoch / num_epochs
        for p in ensemble_model.model_1.parameters():
            p.requires_grad = False
        for p in ensemble_model.model_2.parameters():
            p.requires_grad = False
        for p in ensemble_model.model_3.parameters():
            p.requires_grad = False
        for p in ensemble_model.out_classifier.parameters():
            p.requires_grad = True
        train_acc, train_obj, models_avg = ensemble_train(
            train_loader,
            ensemble_model,
            criterion,
            optimizer,
            grad_clip=ensemble_config['grad_clip_value'])
        logging.info('train_acc %f', train_acc)
        logging.info('models_avg {}'.format(models_avg))
        lr_scheduler.step()

        test_acc, test_obj, models_avg = ensemble_infer(
            test_loader, ensemble_model, criterion)
        logging.info('test_acc %f', test_acc)
        logging.info('models_avg {}'.format(models_avg))

    if save_model_str:
        # Save the model checkpoint, can be restored via "model = torch.load(save_model_str)"
        if not os.path.exists(save_model_str):
            os.mkdir(save_model_str)
        os.path.join(save_model_str, 'ENSEMBLE')

        torch.save(ensemble_model.state_dict(),
                   os.path.join(save_model_str, time.ctime()))
Пример #7
0
def set_infer_cfg(cfg):
    # config
    test_cfg = cfg.test_cfg
    test_transform = test_cfg.test_transform
    processes_num = test_cfg.setdefault(key='processes_num', default=4)
    pool = Pool(processes=processes_num)  # 进程数可能需要调节
    device = test_cfg.setdefault(key='device', default='cuda:0')
    # device_available = test_cfg.setdefault(key='device_available', default=['cuda:0'])
    is_model_half = test_cfg.setdefault(key='is_model_half', default=False)
    # 获取模型集成的权重
    ensemble_weight = None
    ensemble_weight = test_cfg.setdefault(
        key='ensemble_weight',
        default=[1.0 / len(test_cfg.check_point_file)] *
        len(test_cfg.check_point_file))
    if len(ensemble_weight) != len(test_cfg.check_point_file):
        raise Exception('权重个数错误!')

    # TRT相关
    is_trt_infer = test_cfg.setdefault(key='is_trt_infer', default=False)
    FLOAT = test_cfg.setdefault(key='FLOAT', default=32)

    # torch 推理相关参数初始化
    models = []
    device_list = []  # 需要考虑并行的方法
    models_num = 0

    # trt推理相关参数初始化
    context = None
    inputs = None
    outputs = None
    bindings = None
    stream = None

    if is_trt_infer:  # trt推理,目前只支持单模
        # 把ckpt转为trt
        TRT_LOGGER = trt.Logger(min_severity=Logger.ERROR)
        print("正在把多模型转为集成模型...")
        print(test_cfg.check_point_file)
        ensemble_model = EnsembleModel(
            check_point_file_list=test_cfg.check_point_file, device=device)
        if not os.path.exists('../user_data/checkpoint/ensemble'):
            os.system("mkdir ../user_data/checkpoint/ensemble")
        ensemble_ckpt_file = '../user_data/checkpoint/ensemble/model.pth'
        trt_file = '../user_data/checkpoint/ensemble/model.trt'
        if not os.path.exists(trt_file):
            torch.save(ensemble_model, ensemble_ckpt_file)
            torch2trt(ckpt_path=ensemble_ckpt_file, FLOAT=FLOAT)

        # 加载trt
        asshole = torch.ones(1).cuda()  # 这玩意儿没用,只是用来把cuda初始化一下,不然会报错
        # Build an engine
        print(trt_file)
        with open(trt_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            engine = runtime.deserialize_cuda_engine(f.read())
        # Create the context for this engine
        context = engine.create_execution_context()
        # Allocate buffers for input and output
        inputs, outputs, bindings, stream = allocate_buffers(
            engine)  # input, output: host # bindings

    else:  # torch推理
        # 加载多个模型
        for ckpt in test_cfg.check_point_file:
            # device = device_available[models_num % len(device_available)]
            model = torch.load(ckpt, map_location=device)
            print(ckpt)
            # model_cfg = cfg.model_cfg
            # model = build_model(model_cfg).to(device)
            if isinstance(model, torch.nn.DataParallel):  # 如果是双卡模型,需要去除module
                model = model.module
            model.eval()
            models.append(model)
            device_list.append(device)
            models_num += 1

    # 构建model_dict
    infer_cfg = Dict({
        'models': models,
        'models_num': models_num,
        # 'device_list': device_list,
        'device': device,
        'test_transform': test_transform,
        'ensemble_weight': ensemble_weight,
        'pool': pool,  # 进程池
        'is_model_half': is_model_half,  # 是否采用半精度推理 float16

        # trt相关参数
        'is_trt_infer': is_trt_infer,
        'context': context,
        'inputs': inputs,
        'outputs': outputs,
        'bindings': bindings,
        'stream': stream,
        'FLOAT': FLOAT,
    })
    return infer_cfg
Пример #8
0
def eval(args):
    mir1k_sr = 16000
    n_fft = 1024
    hop_length = n_fft // 4
    num_rnn_layer = 3
    num_hidden_units = args['hidden_size']
    checkpoint = torch.load("model_10000.pth")

    mir1k_dir = 'data/MIR1K/MIR-1K'
    test_path = os.path.join(mir1k_dir, 'test_temp.json')
    # test_path = os.path.join(mir1k_dir, 'MIR-1K_test.json')

    with open(test_path, 'r') as text_file:
        content = json.load(text_file)
        # content = text_file.readlines()
    # wav_filenames = [file.strip() for file in content] 
    wav_filenames = ["{}/{}".format("data/MIR1K/MIR-1K/Wavfile", f) for f in content]
    print(len(wav_filenames))
    split_size = int(len(wav_filenames)/5.)
    model = EnsembleModel(n_fft // 2 , 512).to(device)
    model.load_state_dict(checkpoint["model_state_dict"])
    wavs_src1_pred = list()
    wavs_src2_pred = list()
    model.eval()
    step = 1
    for i in range(5):
        start = i*split_size
        wavs_mono, wavs_src1, wavs_src2 = load_wavs(filenames = wav_filenames[start:start+split_size], sr = mir1k_sr)

        stfts_mono, stfts_src1, stfts_src2 = wavs_to_specs(
            wavs_mono = wavs_mono, wavs_src1 = wavs_src1, wavs_src2 = wavs_src2, n_fft = n_fft, hop_length = hop_length)
        stfts_mono_full, stfts_src1_full, stfts_src2_full = prepare_data_full(stfts_mono = stfts_mono, stfts_src1 = stfts_src1, stfts_src2 = stfts_src2)
        # print(len(stfts_mono_full))
        with torch.no_grad():
            for wav_filename, wav_mono, stft_mono_full in zip(wav_filenames, wavs_mono, stfts_mono_full):
                # print(stft_mono_full.shape)
                stft_mono_magnitude, stft_mono_phase = separate_magnitude_phase(data = stft_mono_full)
                max_length_even = stft_mono_magnitude.shape[0]-1 if (stft_mono_magnitude.shape[0]%2 != 0) else stft_mono_magnitude.shape[0]
                stft_mono_magnitude = np.array([stft_mono_magnitude[:max_length_even,:512]])
                # print(stft_mono_magnitude.shape)
                stft_mono_magnitude = torch.Tensor(stft_mono_magnitude).to(device)

                orig_length = max_length_even
                # reminder = np.floor(orig_length / 64)
                # print(64*reminder)
                startIdx = 0
                y1_pred_list = np.zeros((orig_length, 512), dtype=np.float32) # (batch, 512, 64)
                y2_pred_list = np.zeros((orig_length, 512), dtype=np.float32)
                while startIdx+64 < orig_length:
                    y1_pred, y2_pred = model(stft_mono_magnitude[:, startIdx: startIdx+64, :])

                # ISTFT with the phase from mono
                    y1_pred = y1_pred.cpu().numpy()
                    y2_pred = y2_pred.cpu().numpy()
                    y1_pred_list[startIdx: startIdx+64, :] = y1_pred[0]
                    y2_pred_list[startIdx: startIdx+64, :] = y2_pred[0]

                    startIdx += 64
                # calcualte things outside of 64 size blocks
                # y1_pred, y2_pred = model(stft_mono_magnitude[:, startIdx: orig_length, :])

                # y1_pred = y1_pred.cpu().numpy()
                # y2_pred = y2_pred.cpu().numpy()
                # y1_pred_list[startIdx: orig_length, :] = y1_pred[0]
                # y2_pred_list[startIdx: orig_length, :] = y2_pred[0]


                y1_stft_hat = combine_magnitdue_phase(magnitudes = y1_pred_list[:(startIdx),:], phases = stft_mono_phase[:(startIdx), :512])
                y2_stft_hat = combine_magnitdue_phase(magnitudes = y2_pred_list[:(startIdx),:], phases = stft_mono_phase[:(startIdx), :512])

                y1_stft_hat = y1_stft_hat.transpose()
                y2_stft_hat = y2_stft_hat.transpose()

                y1_hat = librosa.istft(y1_stft_hat, hop_length = hop_length)
                y2_hat = librosa.istft(y2_stft_hat, hop_length = hop_length)


                wavs_src1_pred.append(y1_hat)
                wavs_src2_pred.append(y2_hat)
                print("{}/{}\n".format(step, len(wav_filenames)))
                step += 1
    wavs_mono, wavs_src1, wavs_src2 = load_wavs(filenames = wav_filenames, sr = mir1k_sr)
    gnsdr, gsir, gsar = bss_eval_global(wavs_mono = wavs_mono, wavs_src1 = wavs_src1, wavs_src2 = wavs_src2, wavs_src1_pred = wavs_src1_pred, wavs_src2_pred = wavs_src2_pred)

    print('GNSDR:', gnsdr)
    print('GSIR:', gsir)
    print('GSAR:', gsar)
Пример #9
0
     init_logging(
         os.path.join(opts.log_dir, 'log_{:s}.txt'.format(opts.task)))
     product_recog = iMateriaList(opts)
     # product_recog.test_ensemble(model_file='./log/nasnetalarge/fusesize_img_331_2/ep7.pkl')
     # product_recog.test_ensemble(model_file='./log/resnet101_c/fusesize_img_224_1/ep11.pkl')
     # product_recog.test_ensemble(model_file='./log/inceptionresnetv2/fusesize_img_299_2/ep1.pkl')
     # product_recog.test_ensemble(model_file='./log/inceptionresnetv2/fusesize_img_299_1/ep12.pkl')
     # product_recog.test_ensemble(model_file='./log/resnet152/fusesize_img_224_1/ep29.pkl')
     product_recog.test_ensemble(
         model_file='./log/senet154/fusesize_img_224_1/ep6.pkl')
     # product_recog.test_ensemble(model_file='./log/senet154/resize_img_224_1/ep11.pkl')
     # product_recog.test_ensemble(model_file='./log/resnet152/resize_img_224_1/ep5.pkl')
     # product_recog.test_ensemble(model_file='./log/ep19.pkl.new')
     # product_recog.test(model_file='./log/resnet152/resize_img_bin/ep7.pkl')
 elif opts.task == 'feature':
     init_logging(
         os.path.join(opts.log_dir, 'log_{:s}.txt'.format(opts.task)))
     product_recog = iMateriaList(opts)
     product_recog.extract_feature(
         model_file=os.path.join(opts.log_dir, 'ep1.pkl'))
     # product_recog.extract_feature(model_file='./log/nasnetalarge/fusesize_img_331_2/ep7.pkl')
     # product_recog.extract_feature(model_file='./log/senet154/fusesize_img_224_1/ep6.pkl')
     # product_recog.extract_feature(model_file='./log/resnet101_c/fusesize_img_224_1/ep11.pkl')
 elif opts.task == 'ensemble':
     ensemble = EnsembleModel()
     # ensemble.fuse_two('./log/ensemble')
     ensemble.fuse_probs('./log/ensemble/fuse')
     # ensemble.fuse('./log/ensemble/val')
     # ensemble.fuse_two_kinds('./log/ensemble/fuse')
 else:
     print('Run script with --task.')