Esempio n. 1
0
    def test_summary_nlp(self):
        def _get_param_from_state_dict(state_dict):
            params = 0
            for k, v in state_dict.items():
                params += np.prod(v.numpy().shape)
            return params

        nlp_net = paddle.nn.GRU(input_size=2,
                                hidden_size=3,
                                num_layers=3,
                                direction="bidirectional")
        paddle.summary(nlp_net, (1, 1, 2))

        rnn = paddle.nn.LSTM(16, 32, 2)
        params_info = paddle.summary(
            rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))])
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)

        rnn = paddle.nn.GRU(16, 32, 2, direction='bidirectional')
        params_info = paddle.summary(rnn, (4, 23, 16))
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)

        rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')
        params_info = paddle.summary(rnn, (4, 23, 16))
        gt_params = _get_param_from_state_dict(rnn.state_dict())
        np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)
Esempio n. 2
0
def quantize_model(config, model):
    if config.get("Slim", False) and config["Slim"].get("quant", False):
        from paddleslim.dygraph.quant import QAT
        assert config["Slim"]["quant"]["name"].lower(
        ) == 'pact', 'Only PACT quantization method is supported now'
        QUANT_CONFIG["activation_preprocess_type"] = "PACT"
        model.quanter = QAT(config=QUANT_CONFIG)
        model.quanter.quantize(model)
        logger.info("QAT model summary:")
        paddle.summary(model, (1, 3, 224, 224))
    else:
        model.quanter = None
    return
Esempio n. 3
0
def main():
    #x = paddle.tensor.rand((2, 3, 520, 520),dtype='float32')
    #print(x.shape)
    x = (2, 3, 520, 520)
    model = GIoNet(num_classes=59, IMG_SIZE=x[2::])
    params_info = paddle.summary(model, x)
    print(params_info)
Esempio n. 4
0
def main():
    args = parse_args()
    cfg, model_name = _trim(get_config(args.config, show=False))
    print(f"Building model({model_name})...")
    model = build_model(cfg)

    params_info = paddle.summary(model, (1, 8, 3, 224, 224))

    print(params_info)
Esempio n. 5
0
def train():
    paddle.set_device('gpu')

    model = resnet50()
    paddle.summary(model, (1, 3, 32, 32))

    transform = Compose([
        paddle.vision.transforms.Transpose(),
        paddle.vision.transforms.Normalize(0, 255.),
    ])
    cifar10 = Cifar10(mode='train', transform=transform)

    loader = paddle.io.DataLoader(cifar10,
                                  shuffle=True,
                                  batch_size=BATCH_SIZE,
                                  num_workers=10)
    for epoch in range(EPOCH_NUM):
        for batch_id, data in enumerate(loader()):
            out = model(data[0])
            out = paddle.mean(out)
            if batch_id % 10 == 0:
                print("Epoch {}: batch {}, out {}".format(
                    epoch, batch_id, out.numpy()))
Esempio n. 6
0
def main():
    args = parse_args()
    cfg, model_name = _trim(get_config(args.config, show=False), args)
    print(f"Building model({model_name})...")
    model = build_model(cfg)

    img_size = args.img_size
    num_seg = args.num_seg
    #NOTE: only support tsm now, will refine soon
    params_info = paddle.summary(model, (1, num_seg, 3, img_size, img_size))
    print(params_info)

    if args.FLOPs:
        flops_info = paddle.flops(model, [1, num_seg, 3, img_size, img_size],
                                  print_detail=True)
        print(flops_info)
Esempio n. 7
0
    def test_summary_error(self):
        with self.assertRaises(TypeError):
            nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
            paddle.summary(nlp_net, (1, 1, '2'))

        with self.assertRaises(ValueError):
            nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
            paddle.summary(nlp_net, (-1, -1))

        paddle.disable_static()
        nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
        paddle.summary(nlp_net, (1, 1, 2))
Esempio n. 8
0
    def test_summary_input(self):
        paddle.enable_static()
        mymodel = MyModel()
        input_data = paddle.rand([1, 20])
        paddle.summary(mymodel, input=input_data)
        paddle.disable_static()

        rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')
        input_data = paddle.rand([4, 23, 16])
        paddle.summary(rnn, input=input_data)

        lenet_List_input = LeNetListInput()
        input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
        paddle.summary(lenet_List_input, input=input_data)

        lenet_dict_input = LeNetDictInput()
        input_data = {
            'x1': paddle.rand([1, 1, 28, 28]),
            'x2': paddle.rand([1, 400])
        }
        paddle.summary(lenet_dict_input, input=input_data)
Esempio n. 9
0
            nn.BatchNorm(128),
            nn.ReLU(),
            nn.Conv1D(128, max_points, 1),
            nn.BatchNorm(max_points),
            nn.ReLU(),
        )
        self.fc = self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(),
                                          nn.Linear(512, 256), nn.ReLU(),
                                          nn.Dropout(p=0.7),
                                          nn.Linear(256, num_classes))

    def forward(self, inputs):
        """
            Input:
                inputs: input points data, [B, 3, N]
            Return:
                x: predicts, [B, num_classes]
        """
        x = paddle.to_tensor(inputs)
        x = self.mlp_1(x)
        x = self.mlp_2(x)
        x = paddle.max(x, axis=2)
        x = self.fc(x)

        return x


if __name__ == '__main__':
    model = PointNet_Basic_Clas()
    paddle.summary(model, (64, 3, 1024))
# 设置损失值的比例
radio_cls_loss = 1.0
radio_bbox_loss = 0.5
radio_landmark_loss = 1.0

# 训练参数值
data_path = '../dataset/48/all_data'
batch_size = 384
learning_rate = 1e-3
epoch_num = 22
model_path = '../infer_models'

# 获取P模型
model = ONet()
paddle.summary(model, input_size=(batch_size, 3, 48, 48))

# 获取数据
train_dataset = CustomDataset(data_path)
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True)

# 设置优化方法
scheduler = paddle.optimizer.lr.PiecewiseDecay(
    boundaries=[6, 14, 20],
    values=[0.001, 0.0001, 0.00001, 0.000001],
    verbose=True)
optimizer = paddle.optimizer.Adam(
    parameters=model.parameters(),
    learning_rate=scheduler,
Esempio n. 11
0
def train(args):
    if dist.get_rank() == 0:
        # 日志记录器
        writer = LogWriter(logdir='log')
    # 设置支持多卡训练
    dist.init_parallel_env()
    # 获取训练数据
    train_dataset = PPASRDataset(args.train_manifest,
                                 args.dataset_vocab,
                                 mean=args.data_mean,
                                 std=args.data_std,
                                 min_duration=args.min_duration,
                                 max_duration=args.max_duration)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              collate_fn=collate_fn,
                              num_workers=args.num_workers,
                              use_shared_memory=False)
    train_loader_shuffle = DataLoader(dataset=train_dataset,
                                      batch_size=args.batch_size,
                                      collate_fn=collate_fn,
                                      num_workers=args.num_workers,
                                      shuffle=True,
                                      use_shared_memory=False)
    # 获取测试数据
    test_dataset = PPASRDataset(args.test_manifest,
                                args.dataset_vocab,
                                mean=args.data_mean,
                                std=args.data_std)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=args.batch_size,
                             collate_fn=collate_fn,
                             num_workers=args.num_workers,
                             use_shared_memory=False)
    # 获取解码器,用于评估
    greedy_decoder = GreedyDecoder(train_dataset.vocabulary)
    # 获取模型,同时数据均值和标准值到模型中,方便以后推理使用
    model = PPASR(train_dataset.vocabulary,
                  data_mean=paddle.to_tensor(args.data_mean),
                  data_std=paddle.to_tensor(args.data_std))
    if dist.get_rank() == 0:
        print('input_size的第三个参数是变长的,这里为了能查看输出的大小变化,指定了一个值!')
        paddle.summary(model, input_size=(args.batch_size, 128, 500))
    # 设置支持多卡训练
    model = paddle.DataParallel(model)
    # 设置优化方法
    clip = paddle.nn.ClipGradByNorm(clip_norm=1.0)
    # 分段学习率
    boundaries = [10, 20, 50, 100]
    lr = [0.1**l * args.learning_rate for l in range(len(boundaries) + 1)]
    # 获取预训练的epoch数
    last_epoch = int(re.findall(r'\d+', args.pretrained_model)
                     [-1]) if args.pretrained_model is not None else -1
    scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=boundaries,
                                                   values=lr,
                                                   last_epoch=last_epoch,
                                                   verbose=True)
    optimizer = paddle.optimizer.Adam(parameters=model.parameters(),
                                      learning_rate=scheduler,
                                      grad_clip=clip)
    # 获取损失函数
    ctc_loss = paddle.nn.CTCLoss()
    # 加载预训练模型
    if args.pretrained_model is not None:
        model.set_state_dict(
            paddle.load(os.path.join(args.pretrained_model, 'model.pdparams')))
        optimizer.set_state_dict(
            paddle.load(os.path.join(args.pretrained_model,
                                     'optimizer.pdopt')))
    train_step = 0
    test_step = 0
    # 开始训练
    for epoch in range(last_epoch, args.num_epoch):
        # 第一个epoch不打乱数据
        if epoch == 1:
            train_loader = train_loader_shuffle
        for batch_id, (inputs, labels, input_lens,
                       label_lens) in enumerate(train_loader()):
            out, out_lens = model(inputs, input_lens)
            out = paddle.transpose(out, perm=[2, 0, 1])
            # 计算损失
            loss = ctc_loss(out, labels, out_lens, label_lens)
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            # 多卡训练只使用一个进程打印
            if batch_id % 100 == 0 and dist.get_rank() == 0:
                print('[%s] Train epoch %d, batch %d, loss: %f' %
                      (datetime.now(), epoch, batch_id, loss))
                writer.add_scalar('Train loss', loss, train_step)
                train_step += 1
            # 固定步数也要保存一次模型
            if batch_id % 2000 == 0 and batch_id != 0 and dist.get_rank() == 0:
                # 保存模型
                save_model(args=args,
                           epoch=epoch,
                           model=model,
                           optimizer=optimizer)
        # 多卡训练只使用一个进程执行评估和保存模型
        if dist.get_rank() == 0:
            # 执行评估
            model.eval()
            cer = evaluate(model, test_loader, greedy_decoder)
            print('[%s] Test epoch %d, cer: %f' % (datetime.now(), epoch, cer))
            writer.add_scalar('Test cer', cer, test_step)
            test_step += 1
            model.train()
            # 记录学习率
            writer.add_scalar('Learning rate', scheduler.last_lr, epoch)
            # 保存模型
            save_model(args=args,
                       epoch=epoch,
                       model=model,
                       optimizer=optimizer)
        scheduler.step()
Esempio n. 12
0
def compress(args):
    if args.data == "cifar10":
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = paddle.vision.datasets.Cifar10(mode="train",
                                                       backend="cv2",
                                                       transform=transform)
        val_dataset = paddle.vision.datasets.Cifar10(mode="test",
                                                     backend="cv2",
                                                     transform=transform)
        class_dim = 10
        image_shape = [3, 32, 32]
        pretrain = False
        args.total_images = 50000
    elif args.data == "imagenet":
        import imagenet_reader as reader
        train_dataset = reader.ImageNetDataset(mode='train')
        val_dataset = reader.ImageNetDataset(mode='val')
        class_dim = 1000
        image_shape = "3,224,224"
    else:
        raise ValueError("{} is not supported.".format(args.data))

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1

    place = paddle.set_device('gpu' if args.use_gpu else 'cpu')
    # model definition
    if use_data_parallel:
        paddle.distributed.init_parallel_env()

    pretrain = True if args.data == "imagenet" else False
    if args.model == "mobilenet_v1":
        net = mobilenet_v1(pretrained=pretrain, num_classes=class_dim)
    elif args.model == "mobilenet_v3":
        net = MobileNetV3_large_x1_0(class_dim=class_dim)
        if pretrain:
            load_dygraph_pretrain(net, args.pretrained_model, True)
    else:
        raise ValueError("{} is not supported.".format(args.model))
    _logger.info("Origin model summary:")
    paddle.summary(net, (1, 3, 224, 224))

    ############################################################################################################
    # 1. quantization configs
    ############################################################################################################
    quant_config = {
        # weight preprocess type, default is None and no preprocessing is performed.
        'weight_preprocess_type': None,
        # activation preprocess type, default is None and no preprocessing is performed.
        'activation_preprocess_type': None,
        # weight quantize type, default is 'channel_wise_abs_max'
        'weight_quantize_type': 'channel_wise_abs_max',
        # activation quantize type, default is 'moving_average_abs_max'
        'activation_quantize_type': 'moving_average_abs_max',
        # weight quantize bit num, default is 8
        'weight_bits': 8,
        # activation quantize bit num, default is 8
        'activation_bits': 8,
        # data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
        'dtype': 'int8',
        # window size for 'range_abs_max' quantization. default is 10000
        'window_size': 10000,
        # The decay coefficient of moving average, default is 0.9
        'moving_rate': 0.9,
        # for dygraph quantization, layers of type in quantizable_layer_type will be quantized
        'quantizable_layer_type': ['Conv2D', 'Linear'],
    }

    if args.use_pact:
        quant_config['activation_preprocess_type'] = 'PACT'

    ############################################################################################################
    # 2. Quantize the model with QAT (quant aware training)
    ############################################################################################################

    quanter = QAT(config=quant_config)
    quanter.quantize(net)

    _logger.info("QAT model summary:")
    paddle.summary(net, (1, 3, 224, 224))

    opt, lr = create_optimizer(net, trainer_num, args)

    if use_data_parallel:
        net = paddle.DataParallel(net)

    train_batch_sampler = paddle.io.DistributedBatchSampler(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=True)
    train_loader = paddle.io.DataLoader(train_dataset,
                                        batch_sampler=train_batch_sampler,
                                        places=place,
                                        return_list=True,
                                        num_workers=4)

    valid_loader = paddle.io.DataLoader(val_dataset,
                                        places=place,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        drop_last=False,
                                        return_list=True,
                                        num_workers=4)

    @paddle.no_grad()
    def test(epoch, net):
        net.eval()
        batch_id = 0
        acc_top1_ns = []
        acc_top5_ns = []

        eval_reader_cost = 0.0
        eval_run_cost = 0.0
        total_samples = 0
        reader_start = time.time()
        for data in valid_loader():
            eval_reader_cost += time.time() - reader_start
            image = data[0]
            label = data[1]
            if args.data == "cifar10":
                label = paddle.reshape(label, [-1, 1])

            eval_start = time.time()

            out = net(image)
            acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
            acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)

            eval_run_cost += time.time() - eval_start
            batch_size = image.shape[0]
            total_samples += batch_size

            if batch_id % args.log_period == 0:
                log_period = 1 if batch_id == 0 else args.log_period
                _logger.info(
                    "Eval epoch[{}] batch[{}] - top1: {:.6f}; top5: {:.6f}; avg_reader_cost: {:.6f} s, avg_batch_cost: {:.6f} s, avg_samples: {}, avg_ips: {:.3f} images/s"
                    .format(epoch, batch_id, np.mean(acc_top1.numpy()),
                            np.mean(acc_top5.numpy()),
                            eval_reader_cost / log_period,
                            (eval_reader_cost + eval_run_cost) / log_period,
                            total_samples / log_period, total_samples /
                            (eval_reader_cost + eval_run_cost)))
                eval_reader_cost = 0.0
                eval_run_cost = 0.0
                total_samples = 0
            acc_top1_ns.append(np.mean(acc_top1.numpy()))
            acc_top5_ns.append(np.mean(acc_top5.numpy()))
            batch_id += 1
            reader_start = time.time()

        _logger.info(
            "Final eval epoch[{}] - acc_top1: {:.6f}; acc_top5: {:.6f}".format(
                epoch, np.mean(np.array(acc_top1_ns)),
                np.mean(np.array(acc_top5_ns))))
        return np.mean(np.array(acc_top1_ns))

    def cross_entropy(input, target, ls_epsilon):
        if ls_epsilon > 0:
            if target.shape[-1] != class_dim:
                target = paddle.nn.functional.one_hot(target, class_dim)
            target = paddle.nn.functional.label_smooth(target,
                                                       epsilon=ls_epsilon)
            target = paddle.reshape(target, shape=[-1, class_dim])
            input = -paddle.nn.functional.log_softmax(input, axis=-1)
            cost = paddle.sum(target * input, axis=-1)
        else:
            cost = paddle.nn.functional.cross_entropy(input=input,
                                                      label=target)
        avg_cost = paddle.mean(cost)
        return avg_cost

    def train(epoch, net):

        net.train()
        batch_id = 0

        train_reader_cost = 0.0
        train_run_cost = 0.0
        total_samples = 0
        reader_start = time.time()
        for data in train_loader():
            train_reader_cost += time.time() - reader_start

            image = data[0]
            label = data[1]
            if args.data == "cifar10":
                label = paddle.reshape(label, [-1, 1])

            train_start = time.time()
            out = net(image)
            avg_cost = cross_entropy(out, label, args.ls_epsilon)

            acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
            acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
            avg_cost.backward()
            opt.step()
            opt.clear_grad()
            lr.step()

            loss_n = np.mean(avg_cost.numpy())
            acc_top1_n = np.mean(acc_top1.numpy())
            acc_top5_n = np.mean(acc_top5.numpy())

            train_run_cost += time.time() - train_start
            batch_size = image.shape[0]
            total_samples += batch_size

            if batch_id % args.log_period == 0:
                log_period = 1 if batch_id == 0 else args.log_period
                _logger.info(
                    "epoch[{}]-batch[{}] lr: {:.6f} - loss: {:.6f}; top1: {:.6f}; top5: {:.6f}; avg_reader_cost: {:.6f} s, avg_batch_cost: {:.6f} s, avg_samples: {}, avg_ips: {:.3f} images/s"
                    .format(
                        epoch, batch_id, lr.get_lr(), loss_n, acc_top1_n,
                        acc_top5_n, train_reader_cost / log_period,
                        (train_reader_cost + train_run_cost) / log_period,
                        total_samples / log_period,
                        total_samples / (train_reader_cost + train_run_cost)))
                train_reader_cost = 0.0
                train_run_cost = 0.0
                total_samples = 0
            batch_id += 1
            reader_start = time.time()

    ############################################################################################################
    # train loop
    ############################################################################################################
    best_acc1 = 0.0
    best_epoch = 0
    for i in range(args.num_epochs):
        train(i, net)
        acc1 = test(i, net)
        if paddle.distributed.get_rank() == 0:
            model_prefix = os.path.join(args.model_save_dir, "epoch_" + str(i))
            paddle.save(net.state_dict(), model_prefix + ".pdparams")
            paddle.save(opt.state_dict(), model_prefix + ".pdopt")

        if acc1 > best_acc1:
            best_acc1 = acc1
            best_epoch = i
            if paddle.distributed.get_rank() == 0:
                model_prefix = os.path.join(args.model_save_dir, "best_model")
                paddle.save(net.state_dict(), model_prefix + ".pdparams")
                paddle.save(opt.state_dict(), model_prefix + ".pdopt")

    ############################################################################################################
    # 3. Save quant aware model
    ############################################################################################################
    if paddle.distributed.get_rank() == 0:
        # load best model
        load_dygraph_pretrain(net,
                              os.path.join(args.model_save_dir, "best_model"))

        path = os.path.join(args.model_save_dir, "inference_model",
                            'qat_model')
        quanter.save_quantized_model(net,
                                     path,
                                     input_spec=[
                                         paddle.static.InputSpec(
                                             shape=[None, 3, 224, 224],
                                             dtype='float32')
                                     ])
Esempio n. 13
0
                     box_loss, landmarks_loss, acc))
        scheduler.step()

        # 保存模型
        if not os.path.exists(model_path):
            os.makedirs(model_path)
        paddle.jit.save(layer=model,
                        path=os.path.join(model_path, name),
                        input_spec=[InputSpec(shape=shape, dtype='float32')])


# 获取O模型
modelO = ONet()
shapeO = [None, 3, 48, 48]
pathO = '../dataset/28/all_data'
paddle.summary(modelO, input_size=(batch_size, 3, 48, 48))
train(modelO, 0.5, 22, pathO, 'ONet', shapeO)

# 获取P模型
modelP = PNet()
shapeP = [None, 3, None, None]
pathP = '../dataset/12/all_data'
paddle.summary(modelP, input_size=(batch_size, 3, 12, 12))
train(modelP, 0.5, 30, pathP, 'PNet', shapeP)

# 获取R模型
modelR = RNet()
shapeR = [None, 3, 24, 24]
pathR = '../dataset/32/all_data'
paddle.summary(modelR, input_size=(batch_size, 3, 24, 24))
train(modelR, 0.5, 22, pathR, 'RNet', shapeR)
Esempio n. 14
0
 def test_summary_non_tensor(self):
     paddle.summary(ModelOutter(), input_size=(-1, 3))
Esempio n. 15
0
def train(args):
    if dist.get_rank() == 0:
        shutil.rmtree('log', ignore_errors=True)
        # 日志记录器
        writer = LogWriter(logdir='log')

    # 设置支持多卡训练
    if len(args.gpus.split(',')) > 1:
        dist.init_parallel_env()

    # 获取训练数据
    train_dataset = PPASRDataset(args.train_manifest,
                                 args.dataset_vocab,
                                 mean_std_filepath=args.mean_std_path,
                                 min_duration=args.min_duration,
                                 max_duration=args.max_duration)
    batch_sampler = paddle.io.DistributedBatchSampler(
        train_dataset, batch_size=args.batch_size, shuffle=True)
    train_loader = DataLoader(dataset=train_dataset,
                              collate_fn=collate_fn,
                              batch_sampler=batch_sampler,
                              num_workers=args.num_workers)
    # 获取测试数据
    test_dataset = PPASRDataset(args.test_manifest,
                                args.dataset_vocab,
                                mean_std_filepath=args.mean_std_path)
    batch_sampler = paddle.io.BatchSampler(test_dataset,
                                           batch_size=args.batch_size)
    test_loader = DataLoader(dataset=test_dataset,
                             collate_fn=collate_fn,
                             batch_sampler=batch_sampler,
                             num_workers=args.num_workers)

    # 获取模型
    model = DeepSpeech2Model(feat_size=train_dataset.feature_dim,
                             dict_size=len(train_dataset.vocabulary),
                             num_conv_layers=args.num_conv_layers,
                             num_rnn_layers=args.num_rnn_layers,
                             rnn_size=args.rnn_layer_size)
    if dist.get_rank() == 0:
        print('input_size的第三个参数是变长的,这里为了能查看输出的大小变化,指定了一个值!')
        paddle.summary(model,
                       input_size=[(None, train_dataset.feature_dim, 970),
                                   (None, )],
                       dtypes=[paddle.float32, paddle.int64])

    # 设置支持多卡训练
    if len(args.gpus.split(',')) > 1:
        model = paddle.DataParallel(model)

    # 设置优化方法
    clip = paddle.nn.ClipGradByNorm(clip_norm=3.0)
    # 获取预训练的epoch数
    last_epoch = int(re.findall(
        r'\d+', args.resume)[-1]) if args.resume is not None else 0
    scheduler = paddle.optimizer.lr.ExponentialDecay(
        learning_rate=args.learning_rate,
        gamma=0.83,
        last_epoch=last_epoch,
        verbose=True)
    optimizer = paddle.optimizer.Adam(
        parameters=model.parameters(),
        learning_rate=scheduler,
        weight_decay=paddle.regularizer.L2Decay(1e-06),
        grad_clip=clip)

    # 获取损失函数
    ctc_loss = paddle.nn.CTCLoss()

    # 加载预训练模型
    if args.pretrained_model is not None:
        model_dict = model.state_dict()
        model_state_dict = paddle.load(
            os.path.join(args.pretrained_model, 'model.pdparams'))
        # 特征层
        for name, weight in model_dict.items():
            if name in model_state_dict.keys():
                if weight.shape != list(model_state_dict[name].shape):
                    print('{} not used, shape {} unmatched with {} in model.'.
                          format(name, list(model_state_dict[name].shape),
                                 weight.shape))
                    model_state_dict.pop(name, None)
            else:
                print('Lack weight: {}'.format(name))
        model.set_dict(model_state_dict)
        print('成功加载预训练模型')

    # 加载预训练模型
    if args.resume is not None:
        model.set_state_dict(
            paddle.load(os.path.join(args.resume, 'model.pdparams')))
        optimizer.set_state_dict(
            paddle.load(os.path.join(args.resume, 'optimizer.pdopt')))
        print('成功恢复模型参数和优化方法参数')

    train_step = 0
    test_step = 0
    # 开始训练
    for epoch in range(last_epoch, args.num_epoch):
        for batch_id, (inputs, labels, input_lens,
                       label_lens) in enumerate(train_loader()):

            out, out_lens = model(inputs, input_lens)
            out = paddle.transpose(out, perm=[1, 0, 2])

            # 计算损失
            loss = ctc_loss(out, labels, out_lens, label_lens)
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()

            # 多卡训练只使用一个进程打印
            if batch_id % 100 == 0 and dist.get_rank() == 0:
                print('[%s] Train epoch %d, batch %d, loss: %f' %
                      (datetime.now(), epoch, batch_id, loss))
                writer.add_scalar('Train loss', loss, train_step)
                train_step += 1

            # 固定步数也要保存一次模型
            if batch_id % 2000 == 0 and batch_id != 0 and dist.get_rank() == 0:
                # 保存模型
                save_model(args=args,
                           epoch=epoch,
                           model=model,
                           optimizer=optimizer)

        # 多卡训练只使用一个进程执行评估和保存模型
        if dist.get_rank() == 0:
            # 执行评估
            model.eval()
            c = evaluate(model, test_loader, test_dataset.vocabulary)
            print('\n', '=' * 70)
            print('[%s] Test epoch %d, cer: %f' % (datetime.now(), epoch, c))
            print('=' * 70)
            writer.add_scalar('Test cer', c, test_step)
            test_step += 1
            model.train()

            # 记录学习率
            writer.add_scalar('Learning rate', scheduler.last_lr, epoch)

            # 保存模型
            save_model(args=args,
                       epoch=epoch,
                       model=model,
                       optimizer=optimizer)
        scheduler.step()
Esempio n. 16
0
        con = self.gru_con(con)[0]
        # 合成GRU特征
        own = self.sub_own(own)
        con = self.sub_con(con)
        # 降维
        own = paddle.tensor.flatten(own, start_axis=1)
        con = paddle.tensor.flatten(con, start_axis=1)
        own = self.features_own(own)
        con = self.features_con(con)
        # 合成正方与反方特征
        features = paddle.tensor.concat([own, con], axis=1)
        role_features = self.sub_role(features)
        flag_features = self.sub_flag(features)
        money_features = self.sub_money(features)
        # 输出层
        role = self.role(role_features)
        flag = self.flag(flag_eatures)
        money = self.money(money_features)

        if not self.is_infer:
            return role, flag, money
        else:
            role_prob = paddle.nn.functional.softmax(role)
            role_sort = paddle.tensor.argsort(role_prob, descending=True)
            return role_prob, role_sort, flag, money


if __name__ == '__main__':
    net = CWNet()
    paddle.summary(net, input_size=[(8, 4), (8, 5)], dtypes="int64")
Esempio n. 17
0
                result = 0
                for grasp_pre in grasps_pre:
                    if max_iou(grasp_pre, grasps_true) > 0.25:
                        result = 1
                        break

            if result:
                val_result['correct'] += 1
            else:
                val_result['failed'] += 1
    acc = val_result['correct'] / (val_result['correct'] +
                                   val_result['failed'])
    val_result['acc'] = acc
    print(time.ctime())
    print('Correct: {}/{}, val_loss: {:0.4f}, acc: {:0.4f}'.format(
        val_result['correct'], val_result['correct'] + val_result['failed'],
        val_result['loss'].numpy()[0], acc))
    return val_result


if __name__ == '__main__':
    use_gpu = True
    paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')
    net = GGCNN(include_depth + include_rgb * 3)
    paddle.summary(net, (1, 4, 300, 300))
    for epoch_num in range(epoch_nums):
        train_result = train(net, epoch_num, train_batches)
        print('validating...')
        val_result = eval_net(net, val_batches)
        fluid.dygraph.save_dygraph(net.state_dict(), "save_dir/params")
Esempio n. 18
0
import paddle
import paddle.nn as nn


class VoxNet(nn.Layer):
    def __init__(self, name_scope='VoxNet_', num_classes=10):
        super(VoxNet, self).__init__()
        self.backbone = nn.Sequential(nn.Conv3D(1, 32, 5, 2), nn.BatchNorm(32),
                                      nn.LeakyReLU(), nn.Conv3D(32, 32, 3, 1),
                                      nn.MaxPool3D(2, 2, 0))
        self.head = nn.Sequential(nn.Linear(32 * 6 * 6 * 6, 128),
                                  nn.LeakyReLU(), nn.Dropout(0.2),
                                  nn.Linear(128, num_classes))

    def forward(self, inputs):
        x = paddle.to_tensor(inputs)
        x = self.backbone(x)
        x = paddle.reshape(x, (-1, 32 * 6 * 6 * 6))
        x = self.head(x)

        return x


if __name__ == '__main__':
    model = VoxNet()
    paddle.summary(model, (64, 1, 32, 32, 32))
# 设置损失值的比例
radio_cls_loss = 1.0
radio_bbox_loss = 0.5
radio_landmark_loss = 0.5

# 训练参数值
data_path = '../dataset/24/all_data'
batch_size = 384
learning_rate = 1e-3
epoch_num = 22
model_path = '../infer_models'

# 获取P模型
model = RNet()
paddle.summary(model, input_size=(batch_size, 3, 24, 24))

# 获取数据
train_dataset = CustomDataset(data_path)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

# 设置优化方法
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[6, 14, 20], values=[0.001, 0.0001, 0.00001, 0.000001],
                                               verbose=True)
optimizer = paddle.optimizer.Adam(parameters=model.parameters(),
                                  learning_rate=scheduler,
                                  weight_decay=paddle.regularizer.L2Decay(1e-4))

# 获取损失函数
class_loss = ClassLoss()
bbox_loss = BBoxLoss()
Esempio n. 20
0
 def test_summary_gpu(self):
     paddle.disable_static(self.device)
     rnn = paddle.nn.LSTM(16, 32, 2)
     params_info = paddle.summary(
         rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))])
Esempio n. 21
0
# 设置损失值的比例
radio_cls_loss = 1.0
radio_bbox_loss = 0.5
radio_landmark_loss = 0.5

# 训练参数值
data_path = '../dataset/12/all_data'
batch_size = 384
learning_rate = 1e-3
epoch_num = 30
model_path = '../infer_models'

# 获取P模型
model = PNet()
paddle.summary(model, input_size=(batch_size, 3, 12, 12))

# 获取数据
train_dataset = CustomDataset(data_path)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

# 设置优化方法
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[6, 14, 20], values=[0.001, 0.0001, 0.00001, 0.000001],
                                               verbose=True)
optimizer = paddle.optimizer.Adam(parameters=model.parameters(),
                                  learning_rate=scheduler,
                                  weight_decay=paddle.regularizer.L2Decay(1e-4))

# 获取损失函数
class_loss = ClassLoss()
bbox_loss = BBoxLoss()
def train(args):
    # 设置支持多卡训练
    if len(args.gpus.split(',')) > 1:
        dist.init_parallel_env()
    if dist.get_rank() == 0:
        shutil.rmtree('log', ignore_errors=True)
        # 日志记录器
        writer = LogWriter(logdir='log')
    # 获取数据
    train_dataset = CustomDataset(args.train_root_path, is_train=False)
    # 设置支持多卡训练
    if len(args.gpus.split(',')) > 1:
        batch_sampler = paddle.io.DistributedBatchSampler(train_dataset, batch_size=args.batch_size, shuffle=True)
    else:
        batch_sampler = paddle.io.BatchSampler(train_dataset, batch_size=args.batch_size, shuffle=True)
    train_loader = DataLoader(dataset=train_dataset, batch_sampler=batch_sampler, num_workers=args.num_workers)
    print("[%s] 总数据类别为:%d" % (datetime.now(), train_dataset.num_classes))

    # 获取模型,贴心的作者同时提供了resnet的模型,以满足不同情况的使用
    if args.use_model == 'resnet_face34':
        model = resnet_face34()
    else:
        model = MobileFaceNet()
    metric_fc = ArcNet(feature_dim=512, class_dim=train_dataset.num_classes)
    if dist.get_rank() == 0:
        paddle.summary(model, input_size=(None, 3, 112, 112))

    # 设置支持多卡训练
    if len(args.gpus.split(',')) > 1:
        model = paddle.DataParallel(model)
        metric_fc = paddle.DataParallel(metric_fc)

    # 获取预训练的epoch数
    last_epoch = int(re.findall(r'\d+', args.resume)[-1]) + 1 if args.resume is not None else 0
    # 学习率衰减
    scheduler = paddle.optimizer.lr.StepDecay(learning_rate=args.learning_rate, step_size=10, gamma=0.1, last_epoch=last_epoch, verbose=True)
    # 设置优化方法
    optimizer = paddle.optimizer.Momentum(parameters=model.parameters() + metric_fc.parameters(),
                                          learning_rate=scheduler,
                                          momentum=0.9,
                                          weight_decay=paddle.regularizer.L2Decay(5e-4))

    # 加载预训练模型
    if args.pretrained_model is not None:
        model_dict = model.state_dict()
        model_state_dict = paddle.load(os.path.join(args.pretrained_model, 'model.pdparams'))
        # 特征层
        for name, weight in model_dict.items():
            if name in model_state_dict.keys():
                if weight.shape != list(model_state_dict[name].shape):
                    print('{} not used, shape {} unmatched with {} in model.'.
                          format(name, list(model_state_dict[name].shape), weight.shape))
                    model_state_dict.pop(name, None)
            else:
                print('Lack weight: {}'.format(name))
        model.set_dict(model_state_dict)
        print('[%s] Rank %d 成功加载 model 参数' % (datetime.now(), dist.get_rank()))

    # 恢复训练
    if args.resume is not None:
        model.set_state_dict(paddle.load(os.path.join(args.resume, 'model.pdparams')))
        metric_fc.set_state_dict(paddle.load(os.path.join(args.resume, 'metric_fc.pdparams')))
        optimizer.set_state_dict(paddle.load(os.path.join(args.resume, 'optimizer.pdopt')))
        print('[%s] Rank %d 成功加载模型参数和优化方法参数' % (datetime.now(), dist.get_rank()))

    # 获取损失函数
    loss = paddle.nn.CrossEntropyLoss()
    train_step = 0
    test_step = 0
    sum_batch = len(train_loader) * (args.num_epoch - last_epoch)
    # 开始训练
    for epoch in range(last_epoch, args.num_epoch):
        loss_sum = []
        accuracies = []
        for batch_id, (img, label) in enumerate(train_loader()):
            start = time.time()
            feature = model(img)
            output = metric_fc(feature, label)
            # 计算损失值
            los = loss(output, label)
            los.backward()
            optimizer.step()
            optimizer.clear_grad()
            # 计算准确率
            label = paddle.reshape(label, shape=(-1, 1))
            acc = accuracy(input=paddle.nn.functional.softmax(output), label=label)
            accuracies.append(acc.numpy()[0])
            loss_sum.append(los.numpy()[0])
            # 多卡训练只使用一个进程打印
            if batch_id % 100 == 0 and dist.get_rank() == 0:
                eta_sec = ((time.time() - start) * 1000) * (sum_batch - (epoch - last_epoch) * len(train_loader) - batch_id)
                eta_str = str(timedelta(seconds=int(eta_sec / 1000)))
                print('[%s] Train epoch %d, batch: %d/%d, loss: %f, accuracy: %f, eta: %s' % (
                    datetime.now(), epoch, batch_id, len(train_loader), sum(loss_sum) / len(loss_sum), sum(accuracies) / len(accuracies), eta_str))
                writer.add_scalar('Train loss', los, train_step)
                train_step += 1
                loss_sum = []
        # 多卡训练只使用一个进程执行评估和保存模型
        if dist.get_rank() == 0:
            print('='*70)
            acc = test(model)
            print('[%s] Test %d, accuracy: %f' % (datetime.now(), epoch, acc))
            print('='*70)
            writer.add_scalar('Test acc', acc, test_step)
            # 记录学习率
            writer.add_scalar('Learning rate', scheduler.last_lr, epoch)
            test_step += 1
            save_model(args, epoch, model, metric_fc, optimizer)
        scheduler.step()
    save_model(args, args.num_epoch, model, metric_fc, optimizer)
Esempio n. 23
0
 def test_summary_nlp(self):
     paddle.enable_static()
     nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)
     paddle.summary(nlp_net, (1, 2))
Esempio n. 24
0
 def test_summary_dtype(self):
     input_shape = (3, 1)
     net = paddle.nn.Embedding(10, 3, sparse=True)
     paddle.summary(net, input_shape, dtypes='int64')
Esempio n. 25
0
    def forward(self, x):
        fc = self._linear(x)
        return fc + self._offset


batch_size = 3
feature_size = 5
output_dim = 1

shape = (batch_size, feature_size, output_dim)

fc_layer = SimpleFcLayer(*shape)

# 查验模型结构
paddle.summary(fc_layer, shape[0:2])  # 输出简单的线性结构
# ---------------------------------------------------------------------------
# Layer (type)       Input Shape          Output Shape         Param #
# ===========================================================================
#   Linear-5           [[3, 5]]              [3, 1]               6
# ===========================================================================
# Total params: 6
# Trainable params: 6
# Non-trainable params: 0
# ---------------------------------------------------------------------------
# Input size (MB): 0.00
# Forward/backward pass size (MB): 0.00
# Params size (MB): 0.00
# Estimated Total Size (MB): 0.00
# ---------------------------------------------------------------------------
Esempio n. 26
0
def main():

    print("\n Paddlepaddle version: {}\n".format(paddle.__version__))

    args = parse_args()

    # 初始化并行环境
    # dist.init_parallel_env()

    # 加载数据集
    train_dataset = paddle.vision.datasets.MNIST(mode='train',
                                                 transform=ToTensor())
    val_dataset = paddle.vision.datasets.MNIST(mode='test',
                                               transform=ToTensor())

    train_loader = paddle.io.DataLoader(train_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=True)
    test_loader = paddle.io.DataLoader(val_dataset, batch_size=args.batch_size)

    # 模型搭建
    mnist = Mnist()
    paddle.summary(net=mnist, input_size=(-1, 1, 28, 28))
    # 增加paddle.DataParallel封装
    # mnist = paddle.DataParallel(mnist)

    optim = paddle.optimizer.Adam(parameters=mnist.parameters())
    loss_fn = paddle.nn.CrossEntropyLoss()

    start_epoch = 0
    epochs = args.epochs

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        info = np.load('./weights/info.npy', allow_pickle=True).item()
        start_epoch = info['epoch'] + 1
        val_loss = info['loss']
        val_acc = info['acc']
        print('Epoch {}, validation loss is: {loss:.4f}, validation accuracy is {acc:.4f}\n'\
            .format(start_epoch,loss=val_loss,acc=val_acc))
        mnist_state_dict = paddle.load('./weights/mnist.pdparams')
        mnist.set_state_dict(mnist_state_dict)
        optim_state_dict = paddle.load('./weights/optim.pdopt')
        optim.set_state_dict(optim_state_dict)

    best_acc = 0.0
    for epoch in range(start_epoch, epochs):
        # 训练
        mnist.train()
        loader = tqdm.tqdm(train_loader)
        for batch_id, (image, label) in enumerate(loader):
            predicts = mnist(image)
            loss = loss_fn(predicts, label)
            acc = paddle.metric.accuracy(predicts, label)
            loss.backward()
            optim.step()
            optim.clear_grad()
            description = (
                'Epoch {} (loss: {loss:.4f}, acc: {acc:.4f})'.format(
                    epoch, loss=loss.numpy().item(), acc=acc.numpy().item()))
            loader.set_description(description)

        # 测试
        mnist.eval()
        losses = 0.0
        accuracy = 0.0
        count = 0
        for batch_id, (image, label) in enumerate(test_loader):
            predicts = mnist(image)
            loss = loss_fn(predicts, label)
            acc = paddle.metric.accuracy(predicts, label)
            count += 1
            losses += loss.numpy().item()
            accuracy += acc.numpy().item()
        val_loss = losses / count
        val_acc = accuracy / count
        print("Testing: loss:{loss:.4f}, acc: {acc:.4f}".format(loss=val_loss,
                                                                acc=val_acc))

        # 保存测试过程结果
        result = OrderedDict()
        result['timestamp'] = datetime.now()
        result['epoch'] = epoch
        result['loss'] = val_loss
        result['accuracy'] = val_acc

        result_dir = './result/'
        if not os.path.exists(result_dir) and result_dir != '':
            os.makedirs(result_dir)
        result_file = os.path.join(result_dir, 'valid_results.csv')
        write_heading = not os.path.exists(result_file)
        with open(result_file, mode='a') as out:
            if write_heading:
                out.write(",".join([str(k) for k, v in result.items()]) + '\n')
            out.write(",".join([str(v) for k, v in result.items()]) + '\n')

        # 保存参数
        print('Saving checkpoint..')
        state = {'epoch': epoch, 'loss': val_loss, 'acc': val_acc}
        # 目前仅支持存储 Layer 或者 Optimizer 的 state_dict 。
        np.save('./weights/info.npy', state, allow_pickle=True)  # 保存相关参数
        paddle.save(mnist.state_dict(), './weights/mnist.pdparams')
        paddle.save(optim.state_dict(), './weights/optim.pdopt')

        # 保存用于部署的模型和参数
        if val_acc > best_acc:
            best_acc = val_acc
            paddle.jit.save(
                mnist,
                './deploy/mnist',
                input_spec=[InputSpec(shape=[1, 1, 28, 28], dtype='float32')])
def train(args):
    # 设置支持多卡训练
    dist.init_parallel_env()
    if dist.get_rank() == 0:
        shutil.rmtree('log', ignore_errors=True)
        # 日志记录器
        writer = LogWriter(logdir='log')
    # 数据输入的形状
    input_shape = eval(args.input_shape)
    # 获取数据
    train_dataset = CustomDataset(args.train_list_path, model='train', spec_len=input_shape[3])
    train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)

    test_dataset = CustomDataset(args.test_list_path, model='test', spec_len=input_shape[3])
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=args.num_workers)

    # 获取模型
    model = resnet34()
    metric_fc = ArcMarginProduct(feature_dim=512, class_dim=args.num_classes, easy_margin=args.easy_margin)
    if dist.get_rank() == 0:
        paddle.summary(model, input_size=input_shape)
    # 设置支持多卡训练
    model = paddle.DataParallel(model)
    metric_fc = paddle.DataParallel(metric_fc)

    # 分段学习率
    boundaries = [10, 30, 70, 100]
    lr = [0.1 ** l * args.learning_rate for l in range(len(boundaries) + 1)]
    scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=boundaries, values=lr, verbose=True)
    # 设置优化方法
    optimizer = paddle.optimizer.Adam(parameters=model.parameters() + metric_fc.parameters(),
                                      learning_rate=scheduler,
                                      weight_decay=paddle.regularizer.L2Decay(1e-4))

    # 加载预训练模型
    if args.pretrained_model is not None:
        model_dict = model.state_dict()
        param_state_dict = paddle.load(os.path.join(args.pretrained_model, 'model.pdparams'))
        for name, weight in model_dict.items():
            if name in param_state_dict.keys():
                if weight.shape != list(param_state_dict[name].shape):
                    print('{} not used, shape {} unmatched with {} in model.'.
                            format(name, list(param_state_dict[name].shape), weight.shape))
                    param_state_dict.pop(name, None)
            else:
                print('Lack weight: {}'.format(name))
        model.set_dict(param_state_dict)
        print('成功加载预训练模型参数')

    # 恢复训练
    if args.resume is not None:
        model.set_state_dict(paddle.load(os.path.join(args.resume, 'model.pdparams')))
        optimizer.set_state_dict(paddle.load(os.path.join(args.resume, 'optimizer.pdopt')))
        print('成功加载模型参数和优化方法参数')

    # 获取损失函数
    loss = FocalLoss(gamma=args.gamma)
    train_step = 0
    test_step = 0
    # 开始训练
    for epoch in range(args.num_epoch):
        loss_sum = []
        for batch_id, (spec_mag, label) in enumerate(train_loader()):
            feature = model(spec_mag)
            output = metric_fc(feature, label)
            # 计算损失值
            los = loss(output, label)
            loss_sum.append(los)
            los.backward()
            optimizer.step()
            optimizer.clear_grad()
            # 多卡训练只使用一个进程打印
            if batch_id % 100 == 0 and dist.get_rank() == 0:
                print('[%s] Train epoch %d, batch_id: %d, loss: %f' % (
                    datetime.now(), epoch, batch_id, sum(loss_sum) / len(loss_sum)))
                writer.add_scalar('Train loss', los, train_step)
                train_step += 1
                loss_sum = []
        # 多卡训练只使用一个进程执行评估和保存模型
        if dist.get_rank() == 0:
            acc = test(model, metric_fc, test_loader)
            print('[%s] Train epoch %d, accuracy: %f' % (datetime.now(), epoch, acc))
            writer.add_scalar('Test acc', acc, test_step)
            # 记录学习率
            writer.add_scalar('Learning rate', scheduler.last_lr, epoch)
            test_step += 1
            save_model(args, model, optimizer)
        scheduler.step()