Пример #1
0
            # iteration
            process_bar = ProcessBar(batches_num)
            for i, train_batch in enumerate(train_batches):
                # get corrupted batch using the un-corrupted data_train
                batch_X, batch_Y = train_batch
                if not kIOnly:
                    batch_X = ZipIQ(batch_X)
                batch_X = batch_X.reshape(batch_X.shape[0], lstm_model.TIMESTEPS, -1)

                if iteration % 5 == 0:
                    _, train_summary, current_loss = \
                        sess.run([optimizer, merged, loss], feed_dict={lstm_model.X: batch_X, lstm_model.Y: batch_Y})
                    train_writer.add_summary(train_summary, iteration)

                    test_X, test_Y = data_manager.get_random_test_samples(kBatchSize)
                    if not kIOnly:
                        test_X = ZipIQ(test_X)
                    test_X = test_X.reshape(test_X.shape[0], lstm_model.TIMESTEPS, -1)
                    test_summary, current_accuracy = \
                        sess.run([merged, accuracy], feed_dict={lstm_model.X: test_X, lstm_model.Y: test_Y})
                    test_writer.add_summary(test_summary, iteration)

                    process_bar.SkipMsg(
                        '({}/{}) loss: {}, accuracy: {}'.format(i, batches_num, current_loss, current_accuracy)
                        , logger)
                else:
                    _ = sess.run([optimizer], feed_dict={lstm_model.X: batch_X, lstm_model.Y: batch_Y})

                iteration += 1
                process_bar.UpdateBar(i + 1)
Пример #2
0
     writer.add_scalars('accuracy', {'train': train_accuracy},
                        global_step=iteration)
     # * Reset static loss and tester
     sum_loss = 0
     tester.restart()
     # * Output
     process_bar.SkipMsg(
         'Train: ({}/{}) loss: {}, accuracy: {}'.format(
             batch_ID, batches_num - 1, train_loss, train_accuracy),
         logger)
 # ! If comes to test iteration, test part of test set samples
 if iteration % (K.TrainLogInterval * K.TestLogMultiplier) == 0:
     process_bar.SkipMsg('/*******Now Test the Model*******/',
                         logger)
     # * Get test data
     test_X, test_Y = data_manager.get_random_test_samples(
         K.TestSamplesNum)
     # Test in eval+no_grad mode
     with torch.no_grad():
         net.eval()
         test_loss, test_accuracy = TestSamples(
             test_X,
             test_Y,
             net,
             tester,
             I_only=K.IOnly,
             batch_size=K.TestBatchSize,
             SNR_generate=K.random_SNR_generate
             if K.IsNoise else None)
     net.train()
     writer.add_scalar('test/loss',
                       test_loss,