示例#1
0
def find(model_name: str, pretrained: bool, train_only_last_layer: bool,
         train_data_loader: torch.utils.data.DataLoader,
         min_learning_rate: float, max_learning_rate: float, num_iter: int,
         step_mode: str):
    """ Find learning rate based on Leslie Smith's approach
    and https://github.com/davidtvs/pytorch-lr-finder implementation.

    Arguments
    ----------
    model_name : str
        Model to train
    pretrained : bool
        True if model should be pretrained, False otherwise
    train_only_last_layer : bool
        Value indicating part of model that were trained (filename will contain information about it)
    train_data_loader: torch.utils.data.DataLoader
        Data loader used for training
    min_learning_rate : float
        Minimum learning rate used for searching
    max_learning_rate : float
        Maximum learning rate used for searching
    num_iter : int
        Number of iterations after which test will be performed
    step_mode : float
        Mode to perform search
    """
    model = get_model(model_name, train_only_last_layer, pretrained)
    criterion, optimizer = get_loss_and_optimizer(model, min_learning_rate)
    lr_finder = LRFinder(model, optimizer, criterion)
    lr_finder.range_test(train_loader=train_data_loader,
                         end_lr=max_learning_rate,
                         num_iter=num_iter,
                         step_mode=step_mode)
    lr_finder.plot()
    lr_finder.reset()
示例#2
0
def lf():
    args = get_args()
    model = select_model(args)
    optimizer = select_optimizer(args, model)
    train_transforms = get_transforms(args)

    train_params = {
        'num_workers': 2,
        'batch_size': args.batch_size,
        'shuffle': True
    }

    train_generator = datasets.ImageFolder(args.root_path + '/' + 'train',
                                           train_transforms)
    train, _ = torch.utils.data.random_split(train_generator, [48000, 12000])

    train_loader = DataLoader(train, pin_memory=True, **train_params)

    criterion = nn.CrossEntropyLoss(reduction='mean')
    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    lr_finder.range_test(train_loader,
                         end_lr=10,
                         num_iter=300,
                         step_mode="exp")
    lr_finder.plot()
示例#3
0
def findLR(model, criterion, optimizer, trainloader):

    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    lr_finder.range_test(trainloader, end_lr=100, num_iter=100)
    lr_finder.plot()  # to inspect the loss-learning rate graph
    lr_finder.reset(
    )  # to reset the model and optimizer to their initial state
示例#4
0
 def findbestlr(self):
     criterion = nn.CrossEntropyLoss()
     optimizer = optim.SGD(self.model.parameters(), lr= 0.01, momentum= 0.95, weight_decay= 0.0005)
     self.lr_finder = LRFinder(self.model, optimizer, criterion, device=self.device)
     self.lr_finder.range_test(self.trainloader, **self.config['range_test']['args'])
     self.lr_finder.plot() # to inspect the loss-learning rate graph
     self.lr_finder.reset() # to reset the model and optimizer to their initial state
     return self.lr_finder
示例#5
0
def plot_lr_(model, train_loader, test_loader, optimizer, criterion ,device = 'cpu' , step_mode = "linear" ):
    
    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    
    lr_finder.range_test(train_loader, end_lr=100, num_iter=100)
    # lr_finder.range_test(train_loader, val_loader=test_loader, end_lr=1, num_iter=100, step_mode = step_mode)
    lr_finder.plot(log_lr=False)
    lr_finder.reset()
示例#6
0
 def find_lr(self, start_lr=1e-6, end_lr=1e2, accum_steps=1, opt='AdamW', wd=0):
   
   self.set_optimizer(opt=opt, lr=start_lr, wd=wd)
   dl = self.LRDataloader(self.data['train'])
   lr_finder = LRFinder(self.model, self.opt, self.loss_func, device="cuda" if torch.cuda.is_available() else "cpu" )
   lr_finder.range_test(dl, end_lr=end_lr, num_iter=100, accumulation_steps=accum_steps)
   lr_finder.plot() 
   lr_finder.reset()
示例#7
0
 def lr_find(self, dl, optimizer, start_lr=1e-7, end_lr=1e-2, num_iter=200):
     lr_finder = LRFinder(self.model,
                          optimizer,
                          self.loss_fn,
                          device=self.device)
     lr_finder.range_test(dl,
                          start_lr=start_lr,
                          end_lr=end_lr,
                          num_iter=num_iter)
示例#8
0
def find_lr(model, opt, loss_func, device, dataLoader):
    lr_finder = LRFinder(model=model,
                         optimizer=opt,
                         criterion=loss_func,
                         device=device)
    lr_finder.range_test(dataLoader, end_lr=100, num_iter=200)
    lr_finder.plot()
    # reset model & opt to their original weights
    lr_finder.reset()
示例#9
0
def lrfinder(net, optimizer, criterion, trainloader, valloader):
    lr_finder = LRFinder(net, optimizer, criterion, device="cuda")
    lr_finder.range_test(trainloader,
                         val_loader=valloader,
                         end_lr=10,
                         num_iter=100,
                         step_mode="exponential")
    lr_finder.plot()
    lr_finder.reset()
示例#10
0
 def find_lr(self):
     lr_finder = LRFinder(self.model, self.optimizer, self.criterion,
                          self.device)
     lr_finder.range_test(self.data_loaders["train"],
                          end_lr=10,
                          num_iter=1000)
     lr_finder.plot()
     plt.savefig(self.save_folder + "/LRvsLoss.png")
     plt.close()
示例#11
0
def finder(model, optimizer, loss_fn, device):
    """
    :param model:
    :param optimizer: SGD with momentum
    :param loss_fn: any of the loss function NLL, CrossEntroyLoss, L1, L2 etc
    :param device: cuda | cpu
    :return:
    """
    return LRFinder(model=model, optimizer=optimizer, criterion=loss_fn, device=device)
示例#12
0
 def lr_finder(self, end_lr=100, num_iter=100):
     lr_finder = LRFinder(self.model,
                          self.opt_fn,
                          self.loss_fn,
                          device=self.device)
     lr_finder.range_test(self.data.train_dl,
                          end_lr=end_lr,
                          num_iter=num_iter)
     lr_finder.plot()
     lr_finder.reset()
示例#13
0
def prepare_lr_finder(task, **kwargs):
    model = task.model
    optimizer = task.optimizer
    criterion = task.criterion
    config = {
        "device": kwargs.get("device", None),
        "memory_cache": kwargs.get("memory_cache", True),
        "cache_dir": kwargs.get("cache_dir", None),
    }
    lr_finder = LRFinder(model, optimizer, criterion, **config)
    return lr_finder
示例#14
0
def main(params):
    start_time = time.time()

    #evauntually uncomment this leaving asis in order ot keep the same results as before to compare.
    set_random_seeds(params)

    if not os.path.exists(params['save_dir']):
        os.makedirs(params['save_dir'])

    with open(os.path.join(params['save_dir'], 'params.pkl'), 'wb') as f:
        pickle.dump(params, f)

    data_input = DataInput(params['data_params'])
    data_input.split_data()
    print('%d samples in the training data' %len(data_input.x_tr))
    # force identity for the first transform
    data_transformer = DataTransformerFactory({'transform_type': 'identity'}, params['random_seed']).manufacture_transformer()

    data_input.embed_data_and_fit_transformer(\
        data_transformer,
        cells_to_subsample=params['transform_params']['cells_to_subsample'],
        num_cells_for_transformer=params['transform_params']['num_cells_for_transformer'],
        use_labels_to_transform_data=params['transform_params']['use_labels_to_transform_data']
    )

    data_input.normalize_data()

    # gates aren't plotted because we're in n dimensions
    unused_cluster_gate_inits = init_gates(data_input, params)

    # data_input.convert_all_data_to_tensors()
    figscale = 8
    fig, axs = plt.subplots(nrows=len(unused_cluster_gate_inits), figsize=(figscale, len(unused_cluster_gate_inits)*figscale))

    print("initializing model")
    for gate, ax in zip(unused_cluster_gate_inits, axs):
        dataset = torch.utils.data.TensorDataset(torch.tensor(data_input.x_tr, dtype=torch.float),
                                                 torch.tensor(data_input.y_tr, dtype=torch.float))
        trainloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
        criterion = torch.nn.BCEWithLogitsLoss()
        model = SingleGateModel(params, gate)

        optimizer = torch.optim.Adam(model.parameters(), lr=1e-7, weight_decay=1e-2)

        print("initializing LR finder")
        lr_finder = LRFinder(model, optimizer, criterion)
        lr_finder.range_test(trainloader, end_lr=1e4, num_iter=100)
        lr_finder.plot(ax=ax)
        print("LR History:", lr_finder.history)
    plt.savefig(os.path.join(params['save_dir'], 'lr_find.png'))

    print('Complete main loop took %.4f seconds' %(time.time() - start_time))
    return
示例#15
0
文件: Trainner.py 项目: BitManC/MyCv
    def lr_range_test(self, val_loss=False):
        lr_finder = LRFinder(self.model,
                             self.optimizer,
                             self.criterion,
                             device=self.device)

        val_loader = self.dl_valid if val_loss else None

        lr_finder.range_test(self.dl_train,
                             val_loader=val_loader,
                             end_lr=100,
                             num_iter=100,
                             step_mode="exp")

        lr_finder.plot()
        lr_finder.reset()
        self.latest_lr_finder_result = lr_finder
示例#16
0
def findLR(model, train_loader, test_loader, criterion, optimizer,
           num_iteration):

    # Add this line before running `LRFinder`
    #model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    lr_finder.range_test(train_loader, end_lr=0.5,
                         num_iter=num_iteration)  # fast ai method
    #lr_finder.range_test(train_loader, val_loader=test_loader, end_lr=10, num_iter = num_iteration, step_mode="linear")
    lr_finder.plot(log_lr=False)
    lr_finder.reset()

    best_lr = lr_finder.history['lr'][lr_finder.history['loss'].index(
        lr_finder.best_loss)]

    return best_lr
示例#17
0
    def find_lr(self):
        """finding suitable learning rate """
        model = self._model
        params = self.__set_lr()

        criterion = torch.nn.L1Loss(size_average=False)
        optimizer = CaffeSGD(params,
                             lr=1e-8,
                             momentum=self.hparams.momentum,
                             weight_decay=self.hparams.wd)

        lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
        trainloader = self.train_dataloader()
        lr_finder.range_test(trainloader,
                             start_lr=1e-7,
                             end_lr=1,
                             num_iter=500)
        lr_finder.plot()
示例#18
0
def learningrate_finder(uper_bound,lower_bound,dataset_directory,end_learning =100,num_iterations=100):
    hparams_tmp = Namespace(
    train_path=dataset_directory + '/train.txt',
    val_path=dataset_directory + '/val.txt',
    test_path=dataset_directory + '/test.txt',
    batch_size=16,
    warmup_steps=100,
    epochs=1,
    lr= uper_bound,
    accumulate_grad_batches=1,)
    module = TrainingModule(hparams_tmp)
    criterion = nn.CrossEntropyLoss()
    optimizer = AdamW(module.parameters(), lr=lower_bound) ## lower bound LR
    lr_finder = LRFinder(module, optimizer, criterion, device="gpu")
    lr_finder.range_test(module.train_dataloader(), end_lr=end_learning, num_iter=num_iterations, accumulation_steps=hparams_tmp.accumulate_grad_batches)
    lr_finder.plot()
    #lr_finer.plot(show_lr=lr) show using learning rate
    lr_finder.reset()
示例#19
0
def find_lr(model: torch.nn.Module, train_data: CircleDataset):
    # range test for finding learning rate as described in
    # https://towardsdatascience.com/finding-good-learning-rate-and-the-one-cycle-policy-7159fe1db5d6
    lr_image = 'learning_rate.png'
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               pin_memory=False)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-7, weight_decay=1e-2)
    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    logger.info("Running range test for learning rate")
    lr_finder.range_test(train_loader, end_lr=100, num_iter=100)
    fig, ax = plt.subplots()
    lr_finder.plot(ax=ax)  # to inspect the loss-learning rate graph
    logger.info(f"Saving image with learning rate plot to {lr_image}")
    fig.savefig(lr_image, dpi=fig.dpi)
    lr_finder.reset()  # to reset the model and optimizer to their initial state
示例#20
0
 def lr_find(self,
             dl,
             optimizer=None,
             start_lr=1e-7,
             end_lr=1e-2,
             num_iter=200):
     if optimizer is None:
         optimizer = torch.optim.SGD(self.model.parameters(),
                                     lr=1e-6,
                                     momentum=0.9)
     lr_finder = LRFinder(self.model,
                          optimizer,
                          self.loss_fn,
                          device=self.device)
     lr_finder.range_test(dl,
                          start_lr=start_lr,
                          end_lr=end_lr,
                          num_iter=num_iter)
     lr_finder.plot()
示例#21
0
 def lr_find(self, device="cuda"):
     """
     This method is a pretraining method that plots the result of the learning rate finder
     to find an optimal learning rate. See also 
     * https://github.com/davidtvs/pytorch-lr-finder
     * 
     """
     #         with torch.no_grad():
     lr_finder = LRFinder(self.model,
                          self.optimizer,
                          self.criterion,
                          device=device)
     lr_finder.range_test(self.train_dataloader(),
                          start_lr=0.0000001,
                          end_lr=10,
                          num_iter=100)
     lr_finder.plot()  # to inspect the loss-learning rate graph
     lr_finder.reset(
     )  # to reset the model and optimizer to their initial state
示例#22
0
def run():
    device = torch.device(GPU_ID if torch.cuda.is_available() else "cpu")
    print(f'Using device {device}')

    hyperparameter = {
        'learning_rate': [1e-2, 1e-3, 3e-4, 1e-4, 3e-5, 1e-7],  # 1e-4
        'weight_decay': [0, 1e-3, 5e-4, 1e-4, 1e-5],  # 1e-4
        'num_epochs': 70,  # 100
        'weights': [0.0, 0.2, 0.4, 0.6, 0.8, 1.0],  # 0.6
        'optimizer': [optim.Adam, optim.SGD],  # Adam
        'image_size': 300,
        'crop_size': 299
    }
    loaders = prepare_dataset('retina', hyperparameter)

    #model: nn.Module = models.resnet50(pretrained=True)
    #num_ftrs = model.fc.in_features
    #model.fc = nn.Linear(num_ftrs, 2)
    model = ptm.inceptionv4(num_classes=1000, pretrained='imagenet')
    num_ft = model.last_linear.in_features
    model.last_linear = nn.Linear(num_ft, 2)

    children = model.features.children()
    for i, child in enumerate(children):
        if i < 0.0 * len(list(children)):
            for param in child.parameters():
                param.require_grad = False

    optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad,
                                     model.parameters()),
                              lr=1e-7,
                              weight_decay=0)
    criterion = nn.CrossEntropyLoss()

    lr_finder = LRFinder(model, optimizer_ft, criterion, device=device)
    lr_finder.range_test(loaders[0], end_lr=0.1, num_iter=100, step_mode='exp')
    lr_finder.plot()
    lr_finder.reset()
    return 0
示例#23
0
    nn.Linear(in_features=30, out_features=1), nn.Sigmoid())

optim = torch.optim.Adam(net.parameters())
criterion = nn.BCELoss()


def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform(m.weight)


net.apply(init_weights)

#%%
from torch_lr_finder import LRFinder
lrf = LRFinder(net, optim, criterion)
lrf.range_test(trainloader, start_lr=10**-5, end_lr=1)
lrf.plot()
lrf.reset()

#%%
n_epochs = 20
scheduler = torch.optim.lr_scheduler.CyclicLR(optim,
                                              10**-3,
                                              10**-2,
                                              cycle_momentum=False)
history = {'train': [], 'val': []}

for epoch in range(n_epochs):
    for x, y in trainloader:
        yhat = net(x)
示例#24
0
def lr_range_test(
    model,
    dataset,
    loss_func,
    optimizer="AdamW",
    batch_size=32,
    num_iter=None,
    skip_start=10,
    skip_end=10,
    start_lr=1e-7,
    end_lr=10,
    plot=False,
):
    if num_iter is None:
        num_iter = 100 + int(np.log10(10 + len(dataset)) * 50)
    n_train = min(len(dataset), num_iter * batch_size)
    n_val = min(int(0.3 * len(dataset)), 2 * num_iter)
    log.debug("num_iter: {}, n_val: {}".format(num_iter, n_val))
    split_idx = int(0.7 * len(dataset))
    idx_train = np.random.choice(split_idx, size=n_train)
    idx_val = np.random.choice(np.arange(split_idx, len(dataset)), size=n_val)
    train_data = Subset(dataset, idx_train)
    val_data = Subset(dataset, idx_val)
    lrtest_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    lrtest_loader_val = DataLoader(val_data, batch_size=1024, shuffle=True)
    lrtest_optimizer = create_optimizer(optimizer, model.parameters(),
                                        start_lr)
    with utils.HiddenPrints():
        lr_finder = LRFinder(model, lrtest_optimizer, loss_func)
        lr_finder.range_test(
            lrtest_loader,
            val_loader=lrtest_loader_val,
            end_lr=end_lr,
            num_iter=num_iter,
            smooth_f=0.2,  # re-consider if lr-rate varies a lot
        )
        lrs = lr_finder.history["lr"]
        losses = lr_finder.history["loss"]
    if skip_end == 0:
        lrs = lrs[skip_start:]
        losses = losses[skip_start:]
    else:
        lrs = lrs[skip_start:-skip_end]
        losses = losses[skip_start:-skip_end]
    if plot:
        with utils.HiddenPrints():
            ax, steepest_lr = lr_finder.plot(
            )  # to inspect the loss-learning rate graph
    chosen_idx = None
    try:
        steep_idx = (np.gradient(np.array(losses))).argmin()
        min_idx = (np.array(losses)).argmin()
        chosen_idx = int((steep_idx + min_idx) / 2.0)
        # chosen_idx = min_idx
        log.debug("lr-range-test results: steep: {:.2E}, min: {:.2E}".format(
            lrs[steep_idx], lrs[min_idx]))
    except ValueError:
        log.error(
            "Failed to compute the gradients, there might not be enough points."
        )
    if chosen_idx is not None:
        max_lr = lrs[chosen_idx]
        log.info("learning rate range test selected lr: {:.2E}".format(max_lr))
    else:
        max_lr = 0.1
        log.error("lr range test failed. defaulting to lr: {}".format(max_lr))
    with utils.HiddenPrints():
        lr_finder.reset(
        )  # to reset the model and optimizer to their initial state
    return max_lr
# grad_clip = 0.001
# weight_decay = 1e-4
opt_func = torch.optim.Adam
criterion = F.cross_entropy
train_dl, val_dl = get_data_loader(subject='S2',
                                   train_batch_size=train_batch_size,
                                   val_batch_size=val_batch_size)

# model = WesadFeedForward(input_dim, output_dim)
model = WesadLSTM(input_dim=input_dim,
                  hidden_dim=input_dim,
                  output_dim=output_dim,
                  lstm_layers=lstm_layers)
# optimizer = opt_func(model.parameters(), lr=max_lr, weight_decay=weight_decay)
optimizer = opt_func(model.parameters(), lr=max_lr)
lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
lr_finder.range_test(train_dl, end_lr=10000, num_iter=1000)
lr_finder.plot()  # to inspect the loss-learning rate graph
# lr_finder.reset() # to reset the model and optimizer to their initial state

# ## Running models - LOSO CV

# In[18]:

epochs = 20
lr = 1e-4

models = []
histories = []
val_histories = []
for subject in subjects:
示例#26
0
net = nn.Sequential(
    nn.Linear(12, 4),
    nn.ReLU(),
    nn.Linear(4, 1),
    #nn.ReLU(),
    #nn.Linear(5, 1)
)

net.apply(init_weights)

# net = net# .cuda()
opt = optim.Adam(net.parameters(), lr=10**-2.8)
criterion = nn.L1Loss()

from torch_lr_finder import LRFinder
lrf = LRFinder(net, opt, criterion)
lrf.range_test(train_loader=trainloader, start_lr=0.0001, end_lr=1)
lrf.plot()
lrf.reset()

#%%
train_losses = []
val_losses = []
scheduler = torch.optim.lr_scheduler.CyclicLR(
    opt,
    10**-3,
    10**-2,
    mode='exp_range',
    step_size_up=(xtrain.size(0) / BATCHSIZE) * 8,
    cycle_momentum=False)
示例#27
0
def train_fully_supervised(model,n_epochs,train_loader,val_loader,criterion,optimizer,scheduler,auto_lr,\
        save_folder,model_name,benchmark=False,save_all_ep=True, save_best=False, device='cpu',num_classes=21):
    """
        A complete training of fully supervised model. 
        save_folder : Path to save the model, the courb of losses,metric...
        benchmark : enable or disable backends.cudnn 
        save_all_ep : if True, the model is saved at each epoch in save_folder
        scheduler : if True, the model will apply a lr scheduler during training
        auto_lr : Auto lr finder 
    """
    torch.backends.cudnn.benchmark = benchmark

    if auto_lr:
        print('Auto finder for the Learning rate')
        lr_finder = LRFinder(model,
                             optimizer,
                             criterion,
                             memory_cache=False,
                             cache_dir='/tmp',
                             device=device)
        lr_finder.range_test(train_loader,
                             start_lr=10e-5,
                             end_lr=10,
                             num_iter=100)

    if scheduler:
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer, lambda x: (1 - x / (len(train_loader) * n_epochs))**0.9)

    loss_test = []
    loss_train = []
    iou_train = []
    iou_test = []
    accuracy_train = []
    accuracy_test = []
    model.to(device)
    for ep in range(n_epochs):
        print("EPOCH", ep)
        model.train()
        state = step_train_supervised(model,train_loader=train_loader,criterion=criterion,\
            optimizer=optimizer,device=device,num_classes=num_classes)
        iou = state.metrics['mean IoU']
        acc = state.metrics['accuracy']
        loss = state.metrics['CE Loss']
        loss_train.append(loss)
        iou_train.append(iou)
        accuracy_train.append(acc)
        print('TRAIN - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
              loss)
        if scheduler:
            lr_scheduler.step()
        #Eval model
        model.eval()
        with torch.no_grad():
            state = eval_model(model,
                               val_loader,
                               device=device,
                               num_classes=num_classes)
            iou = state.metrics['mean IoU']
            acc = state.metrics['accuracy']
            loss = state.metrics['CE Loss']
            loss_test.append(loss)
            iou_test.append(iou)
            accuracy_test.append(acc)
            print('TEST - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
                  loss)

        ## Save model
        U.save_model(model,
                     save_all_ep,
                     save_best,
                     save_folder,
                     model_name,
                     ep=ep,
                     iou=iou,
                     iou_test=iou_test)

    U.save_curves(path=save_folder,loss_train=loss_train,iou_train=iou_train,accuracy_train=accuracy_train\
                                ,loss_test=loss_test,iou_test=iou_test,accuracy_test=accuracy_test)
示例#28
0
    def find_lr(self):
        from torch_lr_finder import LRFinder

        logger.info('finding the best learning rate')

        cfg = self.config

        if self.tsai_mode:
            import sodium.tsai_model as module_arch
        else:
            import sodium.model.model as module_arch

        # create a model instance
        model = get_instance(module_arch, 'arch', cfg)

        # setup the model with the device
        model, device = setup_device(model, cfg['target_device'])

        param_groups = setup_param_groups(model, cfg['optimizer'])
        optimizer = get_instance(module_optimizer, 'optimizer', cfg,
                                 param_groups)

        criterion = getattr(module_loss, cfg['criterion'])()

        self.lr_finder = LRFinder(model, optimizer, criterion, device="cuda")

        lr_finder_epochs = cfg['lr_finder']['epochs']
        logger.info(f'Running LR-Test for {lr_finder_epochs} epochs')
        # my method
        self.lr_finder.range_test(self.trainer.train_loader,
                                  start_lr=1e-3,
                                  end_lr=1,
                                  num_iter=len(self.trainer.test_loader) *
                                  lr_finder_epochs,
                                  step_mode='linear')

        # leslie smith method
        # self.lr_finder.range_test(self.trainer.train_loader, val_loader = self.trainer.test_loader,
        # end_lr=1, num_iter=len(self.trainer.train_loader), step_mode='linear')

        # fast ai method
        # self.lr_finder.range_test(
        #     self.trainer.train_loader, end_lr=100, num_iter=len(self.trainer.train_loader))

        self.best_lr = self.lr_finder.history['lr'][
            self.lr_finder.history['loss'].index(self.lr_finder.best_loss)]

        sorted_lrs = [
            x for _, x in sorted(
                zip(self.lr_finder.history['loss'],
                    self.lr_finder.history['lr']))
        ]

        logger.info(f'sorted lrs : {sorted_lrs[:10]}')

        logger.info(f'found the best lr : {self.best_lr}')

        logger.info('plotting lr_finder')

        plt.style.use("dark_background")
        self.lr_finder.plot()

        # reset the model and the optimizer
        self.lr_finder.reset()
        plt.show()

        del model, optimizer, criterion
示例#29
0
def lr_finder(model, optimizer, criterion, device):
    return LRFinder(model=model,
                    optimizer=optimizer,
                    criterion=criterion,
                    device=device)
示例#30
0
def Interpol(N, neurons, iter, fun=0, a=1, b=1):

    datasamp = datagen(N, neurons, fun, a, b, legendre)
    val_inputs, val_labels = datasamp.get_val()
    train_inputs, train_labels = datasamp.get_train()
    train_loader = DataLoader(dataset=datasamp,
                              num_workers=0)  # Initiate the data and labels

    class LockedCybenko(torch.nn.Module
                        ):  # Cybenko with inner weight=1 and bias=-x[i]
        def __init__(self):
            super(LockedCybenko, self).__init__()
            self.fc1 = torch.nn.Linear(1, neurons, bias=True)
            self.fc1.weight.data = torch.ones(neurons).reshape(-1, 1)
            self.fc1.bias.data = -torch.linspace(-1, 1, neurons).reshape(
                1, -1).float()
            self.fc1.weight.requires_grad_(False)
            self.fc1.bias.requires_grad_(False)
            self.fc2 = torch.nn.Linear(neurons, 1, bias=False)
            self.relu = torch.nn.ReLU()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            return self.fc2(x)

    class SemilockedCybenko(
            torch.nn.Module
    ):  # Cybenko with inner weight=-1, one node less and free bias
        def __init__(self):
            super(SemilockedCybenko, self).__init__()
            self.fc1 = torch.nn.Linear(1, neurons, bias=True)
            self.fc1.weight.data = torch.ones(neurons - 1).reshape(-1, 1)
            self.fc1.weight.requires_grad_(False)
            self.fc1.bias.requires_grad_(True)
            self.fc2 = torch.nn.Linear(neurons, 1, bias=False)
            self.relu = torch.nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            return self.fc2(x)

    class UnlockedCybenko(torch.nn.Module
                          ):  # Cybenko with free inner weight or bias
        def __init__(self):
            super(UnlockedCybenko, self).__init__()
            self.fc1 = torch.nn.Linear(1, neurons, bias=True)
            self.fc2 = torch.nn.Linear(neurons, 1, bias=True)
            self.relu = torch.nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            return self.fc2(x)

    class Network(torch.nn.Module):  # Arbitrary network
        def __init__(self):
            super(Network, self).__init__()
            self.fc1 = torch.nn.Linear(1, neurons, bias=True)
            self.fc2 = torch.nn.Linear(neurons, 2 * neurons, bias=True)
            self.fc3 = torch.nn.Linear(2 * neurons, 1, bias=True)
            self.relu = torch.nn.ReLU()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            return self.fc3(x)

    model = Network()
    criterion = torch.nn.MSELoss(reduction="sum")
    optimizer = torch.optim.SGD(model.parameters(), lr=0.005)

    lr_finder = LRFinder(model, optimizer, criterion)
    lr_finder.range_test(train_loader,
                         start_lr=0.001,
                         end_lr=1.5,
                         num_iter=1000)
    lr_finder.reset(
    )  # to reset the model and optimizer to their initial state
    learning = lr_finder.history.get('lr')[np.argmin(
        lr_finder.history.get('loss'))]

    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

    EL2Val = []
    EL2train = []
    ELinf = []
    EL2 = []  # L2 integral between f and u_teta

    for epoch in range(iter):
        x = []
        ytrue = []
        ypred = []
        for i, (inputs, labels) in enumerate(train_loader):
            y_pred = model(inputs)
            loss = criterion(y_pred, labels)
            x.append(inputs.data.numpy())
            ytrue.append(labels.data.numpy())
            ypred.append(y_pred.data.numpy())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        def modelonx(x):
            return model(
                torch.tensor(x.reshape(-1, 1).tolist(),
                             requires_grad=False)).data.numpy().reshape(1, -1)

        def L2error(x):
            return (modelonx(x) - np.array(truef(x, fun)).reshape(1, -1))**2

        ELinf.append(max(abs(val_labels - model(val_inputs))))
        EL2.append(quadrature(L2error, -1, 1)[0][0])
        EL2Val.append(criterion(val_labels, model(val_inputs)))
        EL2train.append((criterion(train_labels, model(train_inputs))))
        print(
            f'Epoch: {epoch} L2 Error on training : {EL2train[-1]:.6e} | L2 Error on validation : {EL2Val[-1]:.6e} | L2 on [-1,1] : {EL2[-1]:.6e}'
        )

        if epoch % 5 == 0:

            fig, ax = pl.subplots(nrows=1, ncols=2)
            plotrange = np.linspace(a - 0.1, b + 0.1, 100)
            """ Function and Model Plot"""
            ax[0].scatter(val_inputs.data.numpy(),
                          val_labels.data.numpy(),
                          c='red',
                          s=15)
            ax[0].scatter(train_inputs, train_labels, s=15)
            ax[0].plot(
                plotrange,
                model(torch.linspace(a - 0.1, b + 0.1,
                                     100).reshape(-1, 1)).data.numpy(), 'r')
            """ # Code qui permet d'afficher la fonction linéaire par morceau
            alpha = model.fc2.weight.data.numpy()[0]
            X = -model.fc1.bias.data.numpy()[0]
            ReLU = lambda t : np.where(t<=0,0,t)
            ax[0].plot(xx,alpha[0]*ReLU(xx-X[0])+alpha[1]*ReLU(xx-X[1])+alpha[2]*ReLU(xx-X[2])+alpha[3]*ReLU(xx-X[3])+alpha[4]*ReLU(xx-X[4])+alpha[5]*ReLU(xx-X[5]))
            """

            ax[0].plot(plotrange, truef(plotrange, fun), c='blue')
            #ax[0].plot(np.linspace(a-0.1,b+0.1,100),np.polyval(np.polyfit(train_inputs.data.numpy().reshape(1,-1)[0],train_labels.data.numpy().reshape(1,-1)[0],10),np.linspace(a-0.1,b+0.1,100)),c='green')
            if fun == 7:
                ax[0].plot(plotrange, maclaurin(plotrange, 50), c='green')
                ax[0].set_ylim(-0.1, 1.1)
            """ Error Plot """
            ax[1].semilogy(range(epoch + 1), EL2Val, color='red')
            ax[1].semilogy(range(epoch + 1), EL2train, color='blue')
            #ax[1].semilogy(range(epoch+1),EL2,color='magenta')
            #ax[1].semilogy(range(epoch+1),ELinf,color='black')
            pl.show()

    return model