test_size = 0.2 # 20% val_size = 0.25 # 25% of trainning size n_train = int(N_sample * (1 - test_size) * (1 - val_size)) epochs = int(n_iter / (n_train / batch_size)) f = open("Trainning_INFO_Regression_58k_QQBAL.txt", "w+") f.write('INFO: Epochs:{} -- Batch size:{} \n'.format(epochs, batch_size)) start = time.time() X, y = Load_Files('truth_DR12Q.fits', 'data_dr12.fits', N_sample, ['QSO', 'QSO_BAL'], classification=False) train_loader, test_loader, val_loader = Data_Loader(X, y, N_sample, batch_size, test_size, val_size) """ for i in range(100): x=np.linspace(300,1000,443) print('Redshift:{}'.format(y[i])) plt.plot(x,X[i,:]) plt.xlabel('Wavelength') plt.ylabel('Renormalized Flux [Arb unix]') plt.show() """ # In[13]:
n_iter = 10000 test_size = 0.2 # 20% val_size = 0.25 # 25% of trainning size n_train = int(N_sample * (1 - test_size) * (1 - val_size)) epochs = int(n_iter / (n_train / batch_size)) fi = open("Trainning_INFO_80k.txt", "w+") fi.write('INFO: Epochs:{} -- Batch size:{} \n'.format(epochs, batch_size)) start = time.time() X, y = Load_Files('truth_DR12Q.fits', 'data_dr12.fits', N_sample, None, classification=True) train_loader, test_loader, val_loader, train_s, test_s, val_s = Data_Loader( X, y, N_sample, batch_size, test_size, val_size, classification=True) # CNN for classification learning_rate = 0.1 class Net_C(nn.Module): def __init__(self): super(Net_C, self).__init__() self.conv1 = nn.Conv1d(1, 64, 15, stride=2) self.conv2 = nn.Conv1d(64, 128, 15, stride=2)