def example(self):
        x = self.generate(1).detach().cpu()
        x = np.array(x)
        x = iflatten_complex_data(x)
        x = ifft_data(x)[0]

        return x
Ejemplo n.º 2
0
    def example(self):
        # Generate an example with numpy array data type
        x = self.generate(1).detach().cpu()
        x = np.array(x)
        x = iflatten_complex_data(x)
        x = ifft_data(x)[0]

        return x
    def example(self):
        # Generate an example with numpy array data type
        x = self.generate(1).detach().cpu()
        x = np.array(x)
        x = istandardize_data_with(x, self.mean, self.std)
        x = iflatten_complex_data(x)
        x = ifft_data(x)[0]

        return x
Ejemplo n.º 4
0
    def example(self):
        # Generate an example with numpy array data type
        global win
        x = self.generate(1).detach().cpu()
        x = np.array(x)
        x = iflatten_complex_data(x)
        x = ifft_data(x)
        # x = iwindow(x, win, 0.6)[0]

        return x
Ejemplo n.º 5
0
    def example(self):
        r = get_random_example(1, config.TRIM_LENGTH)
        e = fft_data(r)
        e = flatten_complex_data(e)
        if self.in_cpu:
            e = torch.tensor(e)
        else:
            e = torch.tensor(e, device=self.device, dtype=torch.float32)

        e = self.forward(e).detach().cpu()
        e = np.array(e)

        e = iflatten_complex_data(e)
        e = ifft_data(e)[0]
        r = np.array(r[0])

        return e - r
    def train(self,
              train_set,
              batch_size,
              num_epoche,
              g_eta,
              d_eta,
              show=True):
        print('Start training | batch_size:{a} | eta:{b}'.format(a=batch_size,
                                                                 b=g_eta))
        global origin
        self.to(self.device)
        self.in_cpu = False
        train_set = torch.tensor(train_set,
                                 dtype=torch.float,
                                 device=self.device)

        g_optimizer = optim.Adam(self.generator.parameters(), lr=g_eta)
        d_optimizer = optim.Adam(self.discriminator.parameters(), lr=d_eta)

        g_target = torch.ones(batch_size, 1).to(self.device)
        d_target = torch.cat(
            [torch.zeros(batch_size, 1),
             torch.ones(batch_size, 1)], 0).to(self.device)

        N = train_set.size()[0]
        N = N - N % batch_size

        best_asds_score = 0
        best_cca_score = 0
        best_scca_score = 0

        for epoch in range(num_epoche):
            tic = time.time()

            perm = torch.randperm(N)

            steps = 0
            for i in range(0, N, batch_size):
                # optimize generator
                g_optimizer.zero_grad()
                noise = self.noise(batch_size)
                g_out = self.generator(noise)
                gd_out = self.discriminator(g_out)

                g_loss = self.discriminator.criterion(gd_out, g_target)
                g_loss.backward()
                g_optimizer.step()

                # optimize discriminator
                d_optimizer.zero_grad()
                indices = perm[i:i + batch_size]
                d_input = torch.cat([g_out.detach(), train_set[indices]], 0)
                d_out = self.discriminator.forward(d_input)

                d_loss = self.discriminator.criterion(d_out, d_target)
                d_loss.backward()
                if g_loss < 3 * d_loss:
                    d_optimizer.step()

                steps += 1
                if show:
                    if steps % 100 == 0:
                        # record training losses
                        g_r = float(g_loss.detach().cpu())
                        d_r = float(d_loss.detach().cpu())
                        self.losses.append([g_r, d_r])

                        # record model score
                        with torch.no_grad():
                            fake = self.generate(1).cpu()

                        # Average spectra diff score
                        fake = iflatten_complex_data(fake)
                        example = self.example()

                        score_asds = average_spectra_diff_score(fake[0])
                        try:
                            score_scca = average_spectra_cca_score(fake[0])
                        except:
                            score_scca = 0
                        try:
                            score_cca = average_cca_score(
                                example, random.choices(origin, k=10))
                        except:
                            score_cca = 0

                        self.scores.append([score_asds, score_cca, score_scca])

                        if score_asds > best_asds_score:
                            best_asd_score = score_asds
                            if epoch > 0:
                                torch.save(self.state_dict(), 'BEST_ASDS')

                        if score_cca > best_cca_score:
                            best_cca_score = score_cca
                            if epoch > 0:
                                torch.save(self.state_dict(), 'BEST_CCA')

                        if score_scca > best_scca_score:
                            best_scca_score = score_scca
                            if epoch > 0:
                                torch.save(self.state_dict(), 'BEST_SCCA')

                        report(loss_title='Training Loss Curve',
                               losses=self.losses,
                               loss_labels=['Generator', 'Discriminator'],
                               score_title='Model Score Curve',
                               scores=self.scores,
                               score_labels=['ASD', 'CCA', 'SCCA'],
                               interval=100,
                               example=example)

            dt = time.time() - tic
            print('epoch ' + str(epoch) + 'finished! Time usage: ' + str(dt))

            # if show is True:
            #     with torch.no_grad():
            #         y = self.generate(1).to(torch.device('cpu'))
            #     y = iflatten_complex_data(y)
            #     diff = average_spectra_diff(y[0])
            #     print('The Spectra Difference: ' + str(diff))

        self.to(torch.device('cpu'))
        self.in_cpu = True

        last_path = os.path.join(
            config.DATA_PATH, 'Trained_Models', 'Complex_Fully_Connected',
            time_stamp() + '|LAST' + '|BC:' + str(batch_size) + '|g_eta:' +
            str(g_eta) + '|d_eta:' + str(d_eta))
        torch.save(self.state_dict(), last_path)

        # Store the model with best ASD score
        try:
            self.load_state_dict(torch.load('BEST_ASDS'))
            os.remove('BEST_ASDS')

            best_asd_path = os.path.join(
                config.DATA_PATH, 'Trained_Models', 'Complex_Fully_Connected',
                time_stamp() + '|ASDS' + '|BC:' + str(batch_size) + '|g_eta:' +
                str(g_eta) + '|d_eta:' + str(d_eta))
            torch.save(self.state_dict(), best_asd_path)
        except:
            pass

        # Store the model with best CCA score
        try:
            self.load_state_dict(torch.load('BEST_CCA'))
            os.remove('BEST_CCA')

            best_cca_path = os.path.join(
                config.DATA_PATH, 'Trained_Models', 'Complex_Fully_Connected',
                time_stamp() + '|CCA' + '|BC:' + str(batch_size) + '|g_eta:' +
                str(g_eta) + '|d_eta:' + str(d_eta))
            torch.save(self.state_dict(), best_cca_path)
        except:
            pass

        # Store the model with best SCCA score
        try:
            self.load_state_dict(torch.load('BEST_SCCA'))
            os.remove('BEST_SCCA')

            best_scca_path = os.path.join(
                config.DATA_PATH, 'Trained_Models', 'Complex_Fully_Connected',
                time_stamp() + '|SCCA' + '|BC:' + str(batch_size) + '|g_eta:' +
                str(g_eta) + '|d_eta:' + str(d_eta))
            torch.save(self.state_dict(), best_scca_path)
        except:
            pass

        return
Ejemplo n.º 7
0
))
# %%
import numpy as np
from play import play_one_video_from
from model_complex_fullconnected import Complex_Fully_Connected_GAN
from data_processor import iflatten_complex_data
from data_processor import ifft_data
import matplotlib.pyplot as plt
# %%
net = play_one_video_from(Complex_Fully_Connected_GAN,
                          '/home/tai/UG4_Project/training_system/BEST_ASD',
                          args=(6, ))
# %%
spec = net.generate(1)
# %%
data = iflatten_complex_data(net.generate(1).detach())
# %%
spec
# %%
plt.plot(np.absolute(data[0]))
# %%
import numpy as np
import matplotlib.pyplot as plt

# %%
x = np.linspace(0, 100, 400)
y1 = x * 3 - 300
y2 = x * (-3) + 200
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(x, y1, label='legend')
ax1.plot(x, y2, label='haha')