alb_list = None raa_list = None vza_list = None sza_list = None toz_list = None rad = None albwf = None o3wf = None #train_loader for i, (features, radiances) in enumerate(train_loader): print(i, features.shape, radiances.shape) #if real_epoch == 0: optimizer.zero_grad() outputs = model(features) loss = msre(outputs, radiances) loss.backward() optimizer.step() #train_losses.append(loss.item()) train_losses = np.append(train_losses, loss.item()) #train_losses.append(loss.data) #if real_epoch == 0: # each batch is 128, print this for one of 10 batches if (i * 128) % (128 * 10) == 0: print(f'{i * 128} / ', len(train_loader) * 128, time.time() - timestamp, datetime.datetime.now()) print(loss.item()) timestamp = time.time()
vza_list = None sza_list = None toz_list = None rad = None albwf = None o3wf = None #train_loader for i, (features, radiances) in enumerate(train_loader): print(i, features.shape, radiances.shape) #if epochs_done == 0: optimizer_l300.zero_grad() optimizer_s300.zero_grad() outputs_s300 = model_s300(features) outputs_l300 = model_l300(features) loss_s300 = msre(outputs_s300, radiances[:, :660]) loss_l300 = msre(outputs_l300, radiances[:, 660:]) loss_s300.backward() loss_l300.backward() optimizer_l300.step() optimizer_s300.step() #train_losses.append(loss.item()) train_losses_s300 = np.append(train_losses_s300, loss_s300.item()) train_losses_l300 = np.append(train_losses_l300, loss_l300.item()) #train_losses.append(loss.data) #if epochs_done == 0: #print(outputs_s300.shape)