Exemplo n.º 1
0
        loss = xentropy + args.ss_loss_reg * ss_loss
    else:
        ss_loss = 0.
        loss = xentropy

    loss.backward()
    optimizer.step()
    if args.ss_loss:
        return {'xentropy': xentropy.item(), 'ss_loss': ss_loss.item()}
    return {'xentropy': xentropy.item()}


if __name__ == "__main__":
    print(f'CUDA is available: {torch.cuda.is_available()}')
    print(args.solvers)
    fix_seeds(args.seed)

    if args.torch_dtype == 'float64':
        dtype = torch.float64
    elif args.torch_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('torch_type should be either float64 or float32')

    device = torch.device(
        'cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')

    ########### Create train / val solvers
    print(args.solvers)
    train_solvers = [
        create_solver(*solver_params, dtype=dtype, device=device)
Exemplo n.º 2
0
        max_lr, base_lr = wandb.config.max_lr, wandb.config.base_lr
    elif wandb.config.max_lr is not None:
        max_lr = wandb.config.max_lr
        base_lr = wandb.config.max_lr / wandb.config.max_lr_reduction
    elif wandb.config.base_lr is not None:
        base_lr = wandb.config.base_lr
        max_lr = base_lr
    else:
        raise ValueError('Either max_lr or base_lr should be defined in WnB config')
    return max_lr, base_lr

if __name__ == "__main__":
    wandb.init(project="metasolver-demo", anonymous="allow",)
    wandb.config.update(args)

    fix_seeds(wandb.config.seed)

    # Path to save checkpoints locally in <args.save_dir>/<entity>/<project>/<run_id> [Julia style]
    makedirs(wandb.config.save_dir)
    makedirs(os.path.join(wandb.config.save_dir, wandb.run.path))

    if wandb.config.torch_dtype == 'float64':
        dtype = torch.float64
    elif wandb.config.torch_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('torch_type should be either float64 or float32')

    device = torch.device('cuda:' + str(wandb.config.gpu) if torch.cuda.is_available() else 'cpu')