コード例 #1
0
ファイル: main.py プロジェクト: ThanThoai/Action_Recognition
def main(config):
    if config.model == 'c3d':
        model, params = C3D(config)
    elif config.model == 'convlstm':
        model, params = ConvLSTM(config)
    elif config.model == 'densenet':
        model, params = densenet(config)
    elif config.model == 'densenet_lean':
        model, params = densenet_lean(config)
    elif config.model == 'resnext':
        model, params = resnext(config)
    else:
        model, params = densenet_lean(config)

    dataset = config.dataset
    sample_size = config.sample_size
    stride = config.stride
    sample_duration = config.sample_duration

    cv = config.num_cv

    # crop_method = GroupRandomScaleCenterCrop(size=sample_size)
    crop_method = MultiScaleRandomCrop(config.scales, config.sample_size[0])
    # norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    norm = Normalize([114.7748, 107.7354, 99.475], [1, 1, 1])
    # spatial_transform = Compose(
    #     [crop_method,
    #      GroupRandomHorizontalFlip(),
    #      ToTensor(1), norm])
    spatial_transform = Compose([
        RandomHorizontalFlip(), crop_method,
        ToTensor(config.norm_value), norm
    ])
    # temporal_transform = RandomCrop(size=sample_duration, stride=stride)
    temporal_transform = TemporalRandomCrop(config.sample_duration,
                                            config.downsample)
    target_transform = Label()

    train_batch = config.train_batch
    train_data = RWF2000('/content/RWF_2000/frames/',
                         g_path + '/RWF-2000.json', 'training',
                         spatial_transform, temporal_transform,
                         target_transform, dataset)
    train_loader = DataLoader(train_data,
                              batch_size=train_batch,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)

    crop_method = GroupScaleCenterCrop(size=sample_size)
    norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    spatial_transform = Compose([crop_method, ToTensor(), norm])
    temporal_transform = CenterCrop(size=sample_duration, stride=stride)
    target_transform = Label()

    val_batch = config.val_batch

    val_data = RWF2000('/content/RWF_2000/frames/', g_path + '/RWF-2000.json',
                       'validation', spatial_transform, temporal_transform,
                       target_transform, dataset)
    val_loader = DataLoader(val_data,
                            batch_size=val_batch,
                            shuffle=False,
                            num_workers=4,
                            pin_memory=True)

    if not os.path.exists('{}/pth'.format(config.output)):
        os.mkdir('{}/pth'.format(config.output))
    if not os.path.exists('{}/log'.format(config.output)):
        os.mkdir('{}/log'.format(config.output))

    batch_log = Log(
        '{}/log/{}_fps{}_{}_batch{}.log'.format(
            config.output,
            config.model,
            sample_duration,
            dataset,
            cv,
        ), ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
    epoch_log = Log(
        '{}/log/{}_fps{}_{}_epoch{}.log'.format(config.output, config.model,
                                                sample_duration, dataset, cv),
        ['epoch', 'loss', 'acc', 'lr'])
    val_log = Log(
        '{}/log/{}_fps{}_{}_val{}.log'.format(config.output, config.model,
                                              sample_duration, dataset, cv),
        ['epoch', 'loss', 'acc'])

    criterion = nn.CrossEntropyLoss().to(device)
    # criterion = nn.BCELoss().to(device)

    learning_rate = config.learning_rate
    momentum = config.momentum
    weight_decay = config.weight_decay

    optimizer = torch.optim.SGD(params=params,
                                lr=learning_rate,
                                momentum=momentum,
                                weight_decay=weight_decay,
                                dampening=False,
                                nesterov=False)

    # optimizer = torch.optim.Adam(params=params, lr = learning_rate, weight_decay= weight_decay)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, verbose=True, factor=config.factor, min_lr=config.min_lr)

    acc_baseline = config.acc_baseline
    loss_baseline = 1

    for p in range(1, config.num_prune):
        if p > 0:
            model = torch.load('{}/pth/prune_{}.pth'.format(
                config.output, p - 1))
        print(f"Prune {p}/{config.num_prune}")
        params = sum([np.prod(p.size()) for p in model.parameters()])
        print("Number of Parameters: %.1fM" % (params / 1e6))
        model = prune_model(model)
        params = sum([np.prod(p.size()) for p in model.parameters()])
        print("Number of Parameters: %.1fM" % (params / 1e6))
        model.to(config.device)
        acc_baseline = 0
        for i in range(5):
            train(i, train_loader, model, criterion, optimizer, device,
                  batch_log, epoch_log)
            val_loss, val_acc = val(i, val_loader, model, criterion, device,
                                    val_log)
            scheduler.step(val_loss)
            if val_acc > acc_baseline or (val_acc >= acc_baseline
                                          and val_loss < loss_baseline):
                # torch.save(
                # model.state_dict(),
                # '{}/pth/prune_{}_{}_fps{}_{}{}_{}_{:.4f}_{:.6f}.pth'.format(
                #     config.output, p, config.model, sample_duration, dataset, cv, i, val_acc,
                #     val_loss))
                torch.save(model,
                           '{}/pth/prune_{}.pth'.format(config.output, p))
コード例 #2
0
        shuffle=False,
        num_workers=cfg["num_workers"],
    )
    dataloaders = {}
    dataloaders["train"] = train_dataloader
    dataloaders["val"] = val_dataloader

    # Define Model
    device = torch.device("cuda")

    model = ConvLSTM(cfg, bias=True, batch_first=True)

    if cfg["resume"]:
        model.load_state_dict(torch.load(cfg["weights"]))

    model.to(device)

    if cfg["optimizer"] == "sgd":
        optimizer = optim.SGD(
            model.parameters(),
            lr=cfg["lr"],
            momentum=0.9,
            weight_decay=cfg["weight_decay"],
        )
    else:
        optimizer = RAdam(
            model.parameters(), lr=cfg["lr"], weight_decay=cfg["weight_decay"]
        )

    criterion = nn.BCELoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)