def train(self, epochs=2**16, batch_size=256, GSNR=1, verbose=1): scale_train = CommFunc.CalScale(GSNR, self.alpha_train, self.R) noise_layers = [ Lambda(CommFunc.addNoise, arguments={'sigma': scale_train, 'alpha_train': self.alpha_train}, input_shape=(self.N,), output_shape=self.return_output_shape, name="noise")] noise = self.compose_model(noise_layers) noise.compile(optimizer=self.optimizer, loss=self.loss) model_layers = self.modulator_layers + noise_layers + self.decoder_layers model = self.compose_model(model_layers) model.compile(optimizer=self.optimizer, loss=self.loss, metrics=[metrics.BER]) model.summary() # generate training data X_train, Y_train = Data.genData(self.k, self.N, batch_size) X_val, Y_val = Data.genData(self.k, self.N, batch_size) t = time() history = self.model.fit(X_train, Y_train, validation_data=[X_val,Y_val], batch_size=batch_size, epochs=epochs, verbose=verbose, shuffle=True) t=time()-t print("Time Used:{}s = {}min".format(t, t / 60)) return self.model,history
def test(self,alpha,GSNR_low,GSNR_up,interval,test_batch,num_words): SNR_dB_start_Eb = GSNR_low SNR_dB_stop_Eb = GSNR_up SNR_points = interval SNR_dB_start_Es = SNR_dB_start_Eb + 10 * np.log10(self.k / self.N) SNR_dB_stop_Es = SNR_dB_stop_Eb + 10 * np.log10(self.k / self.N) SNRs = np.linspace(SNR_dB_start_Eb, SNR_dB_stop_Eb, SNR_points) sigma_start = np.sqrt(1 / (2 * 10 ** (SNR_dB_start_Es / 10))) sigma_stop = np.sqrt(1 / (2 * 10 ** (SNR_dB_stop_Es / 10))) sigmas = np.linspace(sigma_start, sigma_stop, SNR_points) nb_errors = np.zeros(len(sigmas), dtype=int) nb_bits = np.zeros(len(sigmas), dtype=int) ber = np.zeros(len(sigmas), dtype=float) seedrand = np.zeros(100, dtype=int) for sr in range(1, 100): seedrand[sr] = np.random.randint(0, 2 ** 14, size=(1)) # seedrand[sr-1]+1 for i in range(0, len(sigmas)): # different SNR scale = CommFunc.CalScale(SNRs[i], alpha, self.R) # print("GSNR={},scale={}".format(SNRs[i], scale)) for ii in range(0, np.round(num_words / test_batch).astype(int)): # Source x_test, d_test=Data.genRanData(self.k, self.N, test_batch, seedrand[ii]) # Modulator (BPSK) s_test = -2 * x_test + 1 # Channel (alpha-stable) y_test = s_test + levy_stable.rvs(alpha, 0, 0, scale, (test_batch, self.N)) # Decoder nb_errors[i] += self.decoder.evaluate(y_test, d_test, batch_size=test_batch, verbose=2)[2] nb_bits[i] += d_test.size ber = np.float32(nb_errors/nb_bits) return ber
print("load file from {0}".format(current_path)) encoder_path = os.path.join(current_path, "encoder") attention_path = os.path.join(current_path, "attention") classifier_path = os.path.join(current_path, "classifier") encoder_rnn = torch.load(encoder_path) attention_model = torch.load(attention_path) classifier = torch.load(classifier_path) model_list.append([encoder_rnn, attention_model, classifier]) print(model_list) # ensemble here test = Data(filename="xml/test/") test_instances = test.features length = len(test_instances) batch_num = length / config.args.batch pred = [] true = [] def generate_batch_data(batch_data): input_index = [] input_mask = [] input_label = [] for batch in batch_data: input_index.append(batch['all_sequence'])