Пример #1
0
 def test_load(self):
     a, _ =fluid.load_dygraph('results/YOUR_DATASET_NAME_genA2B_params_latest.pt')
     
     b, _ =fluid.load_dygraph('results/YOUR_DATASET_NAME_genB2A_params_latest.pt')
      
     self.genA2B.load_dict(a)
     self.genB2A.load_dict(b)
Пример #2
0
    def test_load(self, dir, step):
        pa = os.path.join(dir, self.dataset + 'genA2B_' + str(step))
        a, _ = fluid.load_dygraph(pa)

        pb = os.path.join(dir, self.dataset + 'genB2A_' + str(step))
        b, _ = fluid.load_dygraph(pb)

        self.genA2B.load_dict(a)
        self.genB2A.load_dict(b)
Пример #3
0
    def test_load(self):
        pa='results\\selfie2anime\\model\\genA2B_20000.pdparams'
        a,_=fluid.load_dygraph(pa)

        pb='results\\selfie2anime\\model\\genB2A_20000.pdparams'
        b,_=fluid.load_dygraph(pb)

        self.genA2B.load_dict(a)
        self.genB2A.load_dict(b)
Пример #4
0
def main():
    global args, best_mIoU
    args = parser.parse_args()

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    if args.dataset == 'LaneDet':
        num_class = 20
    else:
        raise ValueError('Unknown dataset ' + args.dataset)

    # get places
    places = fluid.cuda_places()

    with fluid.dygraph.guard():
        model = models.ERFNet(num_class, [576, 1024])
        input_mean = model.input_mean
        input_std = model.input_std

        if args.resume:
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint, _ = fluid.load_dygraph(args.resume)
            model.load_dict(checkpoint)
            print("=> checkpoint loaded successfully")
        else:
            print(("=> loading checkpoint '{}'".format('trained/ERFNet_trained')))
            checkpoint, _ = fluid.load_dygraph('trained/ERFNet_trained')
            model.load_dict(checkpoint)
            print("=> default checkpoint loaded successfully")

        # Data loading code
        test_dataset = ds.LaneDataSet(
            dataset_path='datasets/PreliminaryData',
            data_list=args.val_list,
            transform=[
                lambda x: cv2.resize(x, (1024, 576)),
                lambda x: x - np.asarray(input_mean)[None, None, :] / np.array(input_std)[None, None, :],
            ]
        )

        test_loader = DataLoader(
            test_dataset,
            places=places[0],
            batch_size=1,
            shuffle=False,
            num_workers=args.workers,
            collate_fn=collate_fn
        )

        ### evaluate ###
        mIoU = validate(test_loader, model)
        # print('mIoU: {}'.format(mIoU))
    return
Пример #5
0
 def load(self, dir, step):
     genA2B, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/genA2B".format(step)))
     genB2A, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/genB2A".format(step)))
     disGA, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/disGA".format(step)))
     disGB, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/disGB".format(step)))
     disLA, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/disLA".format(step)))
     disLB, _ = fluid.load_dygraph(
         os.path.join(dir, "{}/disLB".format(step)))
     _, D_optim = fluid.load_dygraph(
         os.path.join(dir, "{}/D_optim".format(step)))
     _, G_optim = fluid.load_dygraph(
         os.path.join(dir, "{}/G_optim".format(step)))
     self.genA2B.load_dict(genA2B)
     self.genB2A.load_dict(genB2A)
     self.disGA.load_dict(disGA)
     self.disGB.load_dict(disGB)
     self.disLA.load_dict(disLA)
     self.disLB.load_dict(disLB)
     self.G_optim.set_dict(G_optim)
     self.D_optim.set_dict(D_optim)
    def load_paddle_weights(self, weights_path):
        import paddle.fluid as fluid
        with fluid.dygraph.guard():
            para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path)

        for k, v in self.net.state_dict().items():
            keyword = 'stages.'
            if keyword in k:
                # replace: 'stages.{}.' -> ''
                start_id = k.find(keyword)
                end_id = start_id + len(keyword) + 1 + 1
                name = k.replace(k[start_id:end_id], '')
            else:
                name = k

            if name.endswith('num_batches_tracked'):
                continue

            if name.endswith('running_mean'):
                ppname = name.replace('running_mean', '_mean')
            elif name.endswith('running_var'):
                ppname = name.replace('running_var', '_variance')
            elif name.endswith('bias') or name.endswith('weight'):
                ppname = name
            else:
                print('Redundance:')
                print(name)
                raise ValueError

            self.net.state_dict()[k].copy_(
                torch.Tensor(para_state_dict[ppname]))
Пример #7
0
    def load_paddle_weights(self, weights_path):
        raise NotImplementedError('implemented in converter.')
        print('paddle weights loading...')
        import paddle.fluid as fluid
        with fluid.dygraph.guard():
            para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path)

        for k,v in self.net.state_dict().items():
            name = k

            if name.endswith('num_batches_tracked'):
                continue

            if name.endswith('running_mean'):
                ppname = name.replace('running_mean', '_mean')
            elif name.endswith('running_var'):
                ppname = name.replace('running_var', '_variance')
            elif name.endswith('bias') or name.endswith('weight'):
                ppname = name

            else:
                print('Redundance:')
                print(name)
                raise ValueError
            try:
                if ppname.endswith('fc.weight'):
                    self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname].T))
                else:
                    self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname]))
            except Exception as e:
                print('pytorch: {}, {}'.format(k, v.size()))
                print('paddle: {}, {}'.format(ppname, para_state_dict[ppname].shape))
                raise e

        print('model is loaded: {}'.format(weights_path))
Пример #8
0
def test_model():
    with fluid.dygraph.guard():
        print('start evaluation .......')
        # 加载模型参数
        model = MNIST("mnist")
        model_state_dict, _ = fluid.load_dygraph('mnist')
        model.load_dict(model_state_dict)

        model.eval()
        eval_loader = load_data('eval')

        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(eval_loader()):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            prediction, acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=prediction, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        # 计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))
        record_result(avg_loss_val_mean, acc_val_mean)
def eval():
    with fluid.dygraph.guard():
        print('start evaluation .......')
        # 加载模型参数
        model = ConvolutionNatualNetwork()
        model_state_dict, _ = fluid.load_dygraph('cnn')
        model.load_dict(model_state_dict)

        model.eval()
        train_loader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=50)
        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(train_loader()):
            image_data = np.array([x[0] for x in data]).astype('float32')
            label_data = np.array([x[1] for x in data
                                   ]).astype('int64').reshape(-1, 1)
            img = fluid.dygraph.to_variable(image_data)
            label = fluid.dygraph.to_variable(label_data)
            prediction, acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=prediction, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        # 计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))
Пример #10
0
def use_model():
	global model

	# 读取预测图像,进行预测
	def load_image(path):
		img = Image.open(path)
		img = img.resize((100, 100), Image.ANTIALIAS)
		img = np.array(img).astype('float32')
		img = img.transpose((2, 0, 1))
		img = img / 255.0
		print(img.shape)
		return img

	# 构建预测动态图过程
	with fluid.dygraph.guard():
		data_path = 'data/Dataset'
		infer_path = data_path + '/手势.JPG'
		model = MyDNN()  # 模型实例化
		model_dict, _ = fluid.load_dygraph('MyDNN')
		model.load_dict(model_dict)  # 加载模型参数
		model.eval()  # 评估模式
		infer_img = load_image(infer_path)
		infer_img = np.array(infer_img).astype('float32')
		infer_img = infer_img[np.newaxis, :, :, :]
		infer_img = fluid.dygraph.to_variable(infer_img)
		result = model(infer_img)
		# display(Image.open('手势.JPG'))
		print(np.argmax(result.numpy()))
Пример #11
0
    def func_testOnlyLoadParams(self):
        with fluid.dygraph.guard():
            emb = fluid.dygraph.Embedding([10, 10])
            state_dict = emb.state_dict()
            fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))

            para_state_dict, opti_state_dict = fluid.load_dygraph(
                os.path.join('saved_dy', 'emb_dy'))

            self.assertTrue(opti_state_dict == None)

            para_state_dict, opti_state_dict = fluid.load_dygraph(
                os.path.join('saved_dy', 'emb_dy.pdparams'))

            para_state_dict, opti_state_dict = fluid.load_dygraph(
                os.path.join('saved_dy', 'emb_dy.pdopt'))
Пример #12
0
def test():
    """main train"""
    args = parse_args()
    # load config
    conf = edict(pickle_read(args.conf_path))
    conf.pretrained = None

    results_path = os.path.join('output', 'tmp_results', 'data')
    # make directory
    mkdir_if_missing(results_path, delete_if_exist=True)

    with fluid.dygraph.guard(fluid.CUDAPlace(0)):
        # training network
        src_path = os.path.join('.', 'models', conf.model + '.py')
        train_model = absolute_import(src_path)
        train_model = train_model.build(conf, args.backbone, 'train')
        train_model.eval()
        train_model.phase = "eval"
        Already_trained, _ = fluid.load_dygraph(args.weights_path)
        print("loaded model from ", args.weights_path)
        train_model.set_dict(Already_trained)  #, use_structured_name=True)
        print("start evaluation...")
        test_kitti_3d(conf.dataset_test, train_model, conf, results_path,
                      args.data_dir)
    print("Evaluation Finished!")
Пример #13
0
def eval1(model_path, test_reader, method):
    
    #method = train_parameters['method']
    #model_path = ''
    with fluid.dygraph.guard():
        
		print("CSR")
		net = CSRNet("CSR")    
        model_dict, _ = fluid.load_dygraph(model_path)
        net.load_dict(model_dict)
        net.eval()
        print('start eval!')
  
        mae=0
        mse = 0
        val_loss = 0
        for batch_id, data in enumerate(test_reader()):
            image = np.array([x[0] for x in data]).astype('float32')
            label = np.array([x[1] for x in data]).astype('float32')
            
            image = fluid.dygraph.to_variable(image)
            label = fluid.dygraph.to_variable(label)
            label.stop_gradient = True
            predict = net(image)
            loss = mse_loss(predict, label)
            val_loss += loss
            mae+=abs(predict.numpy().sum()-label.numpy().sum())
            mse += (predict.numpy().sum()-label.numpy().sum())*(predict.numpy().sum()-label.numpy().sum())
            if batch_id % 99 ==0:
                print(batch_id, 'predict:', predict.numpy().sum(), 'real:', label.numpy().sum())
        
        print('counts:', batch_id+1, 'loss:',val_loss.numpy()[0], 'avg_loss', val_loss.numpy()[0] / (batch_id+1), "mae:", str(mae/(batch_id+1)), 'mse:', mse/(batch_id+1))
        print('real:', label.numpy().sum(), 'predict:', predict.numpy().sum())
Пример #14
0
def main():
    data_path = "../../data/train/before/LINE_100_dbdt.dat"

    with fluid.dygraph.guard():
        model = AlexNet()
        # Load static
        min_dict, _ = fluid.load_dygraph(model_path='min_polyfit')
        # print(min_dict)
        model.set_dict(stat_dict=min_dict)

        model.eval()

        data_file = SingleFile(data_path)
        one_point = data_file.get_one_point()

        data = one_point.get_data()

        data = np.array(data, 'float32').reshape(1, 2, 1, 100)
        # teacher.res
        data = fluid.dygraph.to_variable(data)

        logits = model(data)

        result = logits.numpy()

        result = back_change(result)

    x_data = one_point.x
    print("RESULT: \n", result)
    one_point.plot(show=False, label='origin')
    plt.plot(x_data, [exponenial_func(x, *result[0]) for x in x_data], label='predict')
    plt.show()
Пример #15
0
def generate_images():

    with fluid.dygraph.guard():
        G = Generator()
        pretrain_file = os.path.join(args.checkpoint_folder, f"G_{args.net}-Epoch-{args.num_epochs}")
        print(pretrain_file)
        if os.path.exists(pretrain_file):
            state,_ = fluid.load_dygraph(pretrain_file)
            model.set_dict(state)

        image_folder = ""
        image_list_file = "dummy_data/fabric_list.txt"

        transform = Transform()

        data = DataLoader(image_folder,image_list_file,transform=transform)
        dataloader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
        dataloader.set_sample_generator(data,1)

        G.eval()
        for i in range(20):
            z = np.random.rand(1,64)
            z = to_variable(z)
            z = fluid.layers.cast(z,dtype='float32')
            fake_x = G(z)
            result = np.squeeze(fake_x.numpy()[0])
            result_min= np.min(result)
            result_range = np.max(result)-np.min(result)
            result = (result-result_min)/result_range*255
            o_file = f"{args.checkpoint_folder}/gen_{i}.png"
            cv2.imwrite(o_file,result)
Пример #16
0
def do_eval(data_path, model_name='mymodel', use_gpu=False):
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    with fluid.dygraph.guard(place):
        model = MyNet()
        model_state_dict, _ = fluid.load_dygraph(
            os.path.join(MODEL_PATH, model_name))
        model.load_dict(model_state_dict)

        model.eval()
        eval_loader = load_data(data_path, mode='eval')

        avg_acc_set = []
        avg_loss_set = []
        for _, data in enumerate(eval_loader()):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            predict, avg_acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=predict, label=label)
            avg_loss = fluid.layers.mean(loss)
            avg_acc_set.append(float(avg_acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        avg_acc_val_mean = np.array(avg_acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, avg_acc_val_mean))
Пример #17
0
def game(model, params_file_path=None):
    total = 0
    correct = 0
    print('loading model .......')
    with fluid.dygraph.guard():
        #加载模型参数
        model_state_dict, _ = fluid.load_dygraph(params_file_path)
        model.load_dict(model_state_dict)
        print('model loaded')

        model.eval()
        while True:
            number = str(random.randint(1000, 9999))
            print('正确结果为:{}'.format(number))
            total = total + 1
            img = generate_image(number)
            t_img = np.array([transform_img(img)])
            show_img(t_img[0])
            v_img = fluid.dygraph.to_variable(t_img)
            prediction = model(v_img)
            predicted_num = []
            for v in prediction:
                tmp = fluid.layers.argmax(x=v, axis=-1)
                tmp = str(tmp.numpy().tolist()[0])
                predicted_num.append(tmp)
            predicted_str = ''.join(predicted_num)
            print('预测结果为:{}'.format(predicted_str))
            if predicted_str == number:
                correct = correct + 1
                print('模型预测正确!')
            else:
                print('模型预测错误..')
            print('共计{}次, 正确率: {:.3}\n'.format(total, correct / total))
Пример #18
0
def load_pretrained_model(model, pretrained_model):
    if pretrained_model is not None:
        logger.info('Load pretrained model from {}'.format(pretrained_model))
        if os.path.exists(pretrained_model):
            ckpt_path = os.path.join(pretrained_model, 'model')
            try:
                para_state_dict, _ = fluid.load_dygraph(ckpt_path)
            except:
                para_state_dict = fluid.load_program_state(pretrained_model)

            model_state_dict = model.state_dict()
            keys = model_state_dict.keys()
            num_params_loaded = 0
            for k in keys:
                if k not in para_state_dict:
                    logger.warning("{} is not in pretrained model".format(k))
                elif list(para_state_dict[k].shape) != list(
                        model_state_dict[k].shape):
                    logger.warning(
                        "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
                        .format(k, para_state_dict[k].shape,
                                model_state_dict[k].shape))
                else:
                    model_state_dict[k] = para_state_dict[k]
                    num_params_loaded += 1
            model.set_dict(model_state_dict)
            logger.info("There are {}/{} varaibles are loaded.".format(
                num_params_loaded, len(model_state_dict)))

        else:
            raise ValueError(
                'The pretrained model directory is not Found: {}'.format(
                    pretrained_model))
    else:
        logger.info('No pretrained model to load, train from scratch')
Пример #19
0
def evaluation(model, params_file_path):
    use_gpu = False
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()

    with fluid.dygraph.guard(place):
        model_state_dict, _ = fluid.load_dygraph(params_file_path)
        model.load_dict(model_state_dict)
        model.eval()

        acc_set = []
        avg_loss_set = []
        for idx, data in enumerate(model.valid_loader()):
            usr, mov, score_label = data
            usr_v = [dygraph.to_variable(var) for var in usr]
            mov_v = [dygraph.to_variable(var) for var in mov]

            _, _, scores_predict = model(usr_v, mov_v)

            pred_scores = scores_predict.numpy()

            avg_loss_set.append(np.mean(np.abs(pred_scores - score_label)))

            diff = np.abs(pred_scores - score_label)
            diff[diff > 0.5] = 1
            acc = 1 - np.mean(diff)
            acc_set.append(acc)
        return np.mean(acc_set), np.mean(avg_loss_set)
Пример #20
0
 def print_paddle_state_dict(self, weights_path):
     import paddle.fluid as fluid
     with fluid.dygraph.guard():
         para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path)
     print('paddle"')
     for k,v in para_state_dict.items():
         print('{}----{}'.format(k,type(v)))
Пример #21
0
def test(args):
    with fluid.dygraph.guard(place):
        model = getattr(models, config.model_name)()
        model_dict, _ = fluid.load_dygraph(config.model_name + '_best')
        model.load_dict(model_dict)
        model.eval()
        test_loader = load_data('eval')
        data_loader = fluid.io.DataLoader.from_generator(capacity=5,
                                                         return_list=True)
        data_loader.set_batch_generator(test_loader, places=place)

        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(data_loader):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            prediction, acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=prediction, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))
Пример #22
0
def to_eval(weight):
    with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
        processor = SentaProcessor(
                data_dir=r"data/",
                vocab_path=r"../../../chrome/zrbdata/ocr_lstm/lstm/data/dict.txt")
        eval_data_generator = processor.data_generator(
                batch_size=train_parameters["batch_size"],
                phase='eval',
                epoch=train_parameters["epoch"],
                shuffle=True)

        model_eval = CNN()
        model, _ = fluid.load_dygraph(weight)
        model_eval.load_dict(model)
        model_eval.eval()
        total_eval_cost, total_eval_acc = [], []
        for eval_batch_id, eval_data in enumerate(eval_data_generator()):
            eval_np_doc = np.array([np.pad(x[0][0:train_parameters["padding_size"]],
                                    (0, train_parameters["padding_size"] -len(x[0][0:train_parameters["padding_size"]])),
                                    'constant',
                                    constant_values=(train_parameters["vocab_size"]))
                            for x in eval_data
                            ]).astype('int64').reshape(-1)
            eval_label = to_variable(np.array([x[1] for x in eval_data]).astype(
                                    'int64').reshape(train_parameters["batch_size"], 1))
            eval_doc = to_variable(eval_np_doc)
            eval_prediction, eval_acc = model_eval(eval_doc, eval_label)
            loss = fluid.layers.cross_entropy(eval_prediction, eval_label)
            avg_loss = fluid.layers.mean(loss)
            total_eval_cost.append(avg_loss.numpy()[0])
            total_eval_acc.append(eval_acc.numpy()[0])
    print("Final validation result: ave loss: %f, ave acc: %f\n" %(np.mean(total_eval_cost), np.mean(total_eval_acc)))
    return np.mean(total_eval_cost), np.mean(total_eval_acc)
Пример #23
0
def inference_mnist():
    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) \
        if args.use_data_parallel else fluid.CUDAPlace(0)
    with fluid.dygraph.guard(place):
        mnist_infer = MNIST()
        # load checkpoint
        model_dict, _ = fluid.load_dygraph("save_temp")
        mnist_infer.set_dict(model_dict)
        print("checkpoint loaded")

        # start evaluate mode
        mnist_infer.eval()

        def load_image(file):
            im = Image.open(file).convert('L')
            im = im.resize((28, 28), Image.ANTIALIAS)
            im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)
            im = im / 255.0 * 2.0 - 1.0
            return im

        cur_dir = os.path.dirname(os.path.realpath(__file__))
        tensor_img = load_image(cur_dir + '/image/infer_3.png')

        results = mnist_infer(to_variable(tensor_img))
        lab = np.argsort(results.numpy())
        print("Inference result of image/infer_3.png is: %d" % lab[0][-1])
Пример #24
0
def _get_activations_from_ims(img, model, batch_size, dims, use_gpu,
                              premodel_path):
    n_batches = (len(img) + batch_size - 1) // batch_size
    n_used_img = len(img)

    pred_arr = np.empty((n_used_img, dims))

    for i in tqdm(range(n_batches)):
        start = i * batch_size
        end = start + batch_size
        if end > len(img):
            end = len(img)
        images = img[start:end]
        if images.shape[1] != 3:
            images = images.transpose((0, 3, 1, 2))
        images /= 255

        images = to_variable(images)
        param_dict, _ = fluid.load_dygraph(premodel_path)
        model.set_dict(param_dict)
        model.eval()
        pred = model(images)[0][0]
        pred_arr[start:end] = pred.reshape(end - start, -1)

    return pred_arr
Пример #25
0
def predict(args, cls_model = None):

    bert_config = BertConfig(args.bert_config_path)
    bert_config.print_config()

    task_name = args.task_name.lower()
    processors = {
        'xnli': reader.XnliProcessor,
        'cola': reader.ColaProcessor,
        'mrpc': reader.MrpcProcessor,
        'mnli': reader.MnliProcessor,
    }

    processor = processors[task_name](data_dir=args.data_dir,
            vocab_path=args.vocab_path,
            max_seq_len=args.max_seq_len,
            do_lower_case=args.do_lower_case,
            in_tokens=False)

    test_data_generator = processor.data_generator(
                                batch_size=args.batch_size,
                                phase='dev',
                                epoch=1,
                                shuffle=False)

    num_labels = len(processor.get_labels())

    with fluid.dygraph.guard(place):
        if cls_model is None:
            cls_model = ClsModelLayer(
                args,
                bert_config,
                num_labels,
                is_training=False,
                return_pooled_out=True)

            #restore the model
            save_path = os.path.join(args.checkpoints, "final")
            print("Load params from %s" % save_path)
            model_dict,_ = fluid.load_dygraph(save_path)
            cls_model.load_dict(model_dict)

        print('Do predicting ...... ')
        cls_model.eval()

        total_cost, total_acc, total_num_seqs = [], [], []

        for batch in test_data_generator():
            data_ids = create_data(batch)
            np_loss, np_acc, np_num_seqs = cls_model(data_ids)

            np_loss = np_loss.numpy()
            np_acc = np_acc.numpy()
            np_num_seqs = np_num_seqs.numpy()

            total_cost.extend(np_loss * np_num_seqs)
            total_acc.extend(np_acc * np_num_seqs)
            total_num_seqs.extend(np_num_seqs)

        print("[evaluation] average acc: %f" % (np.sum(total_acc) / np.sum(total_num_seqs)))
def test(args):
    # parse config
    config = parse_config(args.config)
    test_config = merge_configs(config, 'test', vars(args))
    print_configs(test_config, 'Test')
    place = fluid.CUDAPlace(0)

    with fluid.dygraph.guard(place):
        video_model = AttentionCluster("AttentionCluster",
                                       test_config,
                                       mode="test")

        model_dict, _ = fluid.load_dygraph(args.weights)
        video_model.set_dict(model_dict)

        test_reader = FeatureReader(name="ATTENTIONCLUSTER",
                                    mode='test',
                                    cfg=test_config)
        test_reader = test_reader.create_reader()

        video_model.eval()
        total_loss = 0.0
        total_acc1 = 0.0
        total_sample = 0

        for batch_id, data in enumerate(test_reader()):
            rgb = np.array([item[0] for item in data
                            ]).reshape([-1, 100, 1024]).astype('float32')
            audio = np.array([item[1] for item in data
                              ]).reshape([-1, 100, 128]).astype('float32')
            y_data = np.array([item[2] for item in data]).astype('float32')
            rgb = to_variable(rgb)
            audio = to_variable(audio)
            labels = to_variable(y_data)
            labels.stop_gradient = True
            output, logit = video_model([rgb, audio])

            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logit,
                                                                  label=labels)
            loss = fluid.layers.reduce_sum(loss, dim=-1)
            avg_loss = fluid.layers.mean(loss)
            # get metrics
            valid_metrics = get_metrics(args.model_name.upper(), 'valid',
                                        test_config)
            hit_at_one, perr, gap = valid_metrics.calculate_and_log_out(
                loss,
                logit,
                labels,
                info='[TEST] test_iter {} '.format(batch_id))

            total_loss += avg_loss.numpy()[0]
            total_acc1 += hit_at_one
            total_sample += 1

            print('TEST iter {}, loss = {}, acc1 {}'.format(
                batch_id,
                avg_loss.numpy()[0], hit_at_one))

        print('Finish loss {} , acc1 {}'.format(total_loss / total_sample,
                                                total_acc1 / total_sample))
Пример #27
0
    def initialize(self):
        with fluid.dygraph.guard():
            if os.path.isabs(self.net_path):
                net_path_full = self.net_path
            else:
                net_path_full = os.path.join(env_settings().network_path,
                                             self.net_path)

            self.net = siamfc_alexnet(
                backbone_pretrained=False,
                backbone_is_test=True,
                estimator_is_test=True)

            state_dictsm, _ = fluid.load_dygraph(net_path_full)
            self.net.load_dict(state_dictsm)
            self.net.train()

            self.target_estimator = self.net.target_estimator

        self.layer_stride = {'conv5': 8}
        self.layer_dim = {'conv5': 256}

        self.estimator_feature_layers = self.net.target_estimator_layer

        if isinstance(self.pool_stride, int) and self.pool_stride == 1:
            self.pool_stride = [1] * len(self.output_layers)

        self.feature_layers = sorted(
            list(set(self.output_layers + self.estimator_feature_layers)))

        self.mean = np.reshape([0., 0., 0.], [1, -1, 1, 1])
        self.std = np.reshape([1 / 255., 1 / 255., 1 / 255.], [1, -1, 1, 1])
    def load_paddle_weights(self, weights_path):
        print('paddle weights loading...')
        import paddle.fluid as fluid
        with fluid.dygraph.guard():
            para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path)

        for k, v in self.net.state_dict().items():
            keyword = 'stages.'
            if keyword in k:
                # replace: stages. -> stage
                name = k.replace(keyword, 'stage')
            else:
                name = k

            if name.endswith('num_batches_tracked'):
                continue

            if name.endswith('running_mean'):
                ppname = name.replace('running_mean', '_mean')
            elif name.endswith('running_var'):
                ppname = name.replace('running_var', '_variance')
            elif name.endswith('bias') or name.endswith('weight'):
                ppname = name
            else:
                print('Redundance:')
                print(name)
                raise ValueError

            self.net.state_dict()[k].copy_(
                torch.Tensor(para_state_dict[ppname]))
        print('model is loaded: {}'.format(weights_path))
Пример #29
0
 def __init__(self, ):
     super().__init__()
     self.network = Network()
     model_path = '/home/aistudio/vid2vid/model/liteflownet/network-default.pdparams'
     state_dict, _ = F.load_dygraph(model_path)
     self.network.load_dict(state_dict)
     self.network.eval()
     print("load pretrained liteflownet from " + model_path)
Пример #30
0
 def load(self, dir, step):
     print(f'Load {dir} for the step {step}')
     names = ['genA2B', 'genB2A', 'disGA', 'disGB', 'disLA', 'disLB']
     for name in names:
         params = fluid.load_dygraph(
             os.path.join(dir, self.dataset + '_%s_params_%07d' %
                          (name, step)))[0]
         getattr(self, name).load_dict(params, use_structured_name=True)