Exemplo n.º 1
0
        config.label_smooth_factor = 0.0
    loss = CrossEntropy(smooth_factor=config.label_smooth_factor,
                        num_classes=config.class_num)
    if args_opt.do_train:
        dataset = create_dataset(dataset_path=args_opt.dataset_path,
                                 do_train=True,
                                 repeat_num=epoch_size,
                                 batch_size=config.batch_size)
        step_size = dataset.get_dataset_size()
        loss_scale = FixedLossScaleManager(config.loss_scale,
                                           drop_overflow_update=False)

        # learning rate strategy with cosine
        lr = Tensor(
            warmup_cosine_annealing_lr(config.lr, step_size,
                                       config.warmup_epochs,
                                       config.epoch_size))
        opt = Momentum(filter(lambda x: x.requires_grad,
                              net.get_parameters()), lr, config.momentum,
                       config.weight_decay, config.loss_scale)
        model = Model(net,
                      loss_fn=loss,
                      optimizer=opt,
                      amp_level='O2',
                      keep_batchnorm_fp32=False,
                      loss_scale_manager=loss_scale,
                      metrics={'acc'})
        time_cb = TimeMonitor(data_size=step_size)
        loss_cb = LossMonitor()
        cb = [time_cb, loss_cb]
        if config.save_checkpoint:
Exemplo n.º 2
0
    if not config.label_smooth:
        config.label_smooth_factor = 0.0
    loss = CrossEntropy(smooth_factor=config.label_smooth_factor,
                        num_classes=config.class_num)
    if args_opt.do_train:
        dataset = create_dataset(dataset_path=args_opt.dataset_path,
                                 do_train=True,
                                 repeat_num=epoch_size,
                                 batch_size=config.batch_size)
        step_size = dataset.get_dataset_size()

        loss_scale = FixedLossScaleManager(config.loss_scale,
                                           drop_overflow_update=False)
        lr = Tensor(
            warmup_cosine_annealing_lr(0.035, step_size, config.warmup_epochs,
                                       50, config.T_max, config.eta_min))
        opt = THOR(
            filter(lambda x: x.requires_grad, net.get_parameters()), lr,
            config.momentum, damping, config.frequency,
            filter(lambda x: 'matrix_A' in x.name, net.get_parameters()),
            filter(lambda x: 'matrix_G' in x.name, net.get_parameters()),
            filter(lambda x: 'spatial_norm' in x.name, net.get_parameters()),
            config.weight_decay, config.loss_scale)

        model = Model(net,
                      loss_fn=loss,
                      optimizer=opt,
                      amp_level='O2',
                      loss_scale_manager=loss_scale,
                      keep_batchnorm_fp32=False,
                      metrics={'acc'},
Exemplo n.º 3
0
    if args_opt.do_train:
        dataset = create_dataset(dataset_path=args_opt.dataset_path,
                                 do_train=True,
                                 repeat_num=epoch_size,
                                 batch_size=config.batch_size)
        step_size = dataset.get_dataset_size()
        loss_scale = FixedLossScaleManager(config.loss_scale,
                                           drop_overflow_update=False)
        if args_opt.pre_trained:
            param_dict = load_checkpoint(args_opt.pre_trained)
            load_param_into_net(net, param_dict)

        # learning rate strategy with cosine
        lr = Tensor(
            warmup_cosine_annealing_lr(config.lr, step_size,
                                       config.warmup_epochs, 120,
                                       config.pretrain_epoch_size * step_size))
        opt = Momentum(filter(lambda x: x.requires_grad,
                              net.get_parameters()), lr, config.momentum,
                       config.weight_decay, config.loss_scale)
        model = Model(net,
                      loss_fn=loss,
                      optimizer=opt,
                      amp_level='O2',
                      keep_batchnorm_fp32=False,
                      loss_scale_manager=loss_scale,
                      metrics={'acc'})
        time_cb = TimeMonitor(data_size=step_size)
        loss_cb = LossMonitor()
        cb = [time_cb, loss_cb]
        if config.save_checkpoint: