コード例 #1
0
ファイル: test.py プロジェクト: kjjjjjjj/vibration-sae
def runCV():
    config.new_experiment()
    start = timeit.default_timer()  # -----------------

    model = AE(n_input=1, n_hidden=config.n_hidden, n_output=1, n_layers=1)
    dataset = Dataset()
    data = dataset[:, [config.CHANNEL]]
    target = dataset[:, [config.CHANNEL]]
    mean = cv(model,
              data,
              target,
              temperature=config.temperature,
              weight_decay=config.weight_decay,
              learning_rate=config.learning_rate,
              sparsity=config.sparsity,
              sparsity_penalty=config.sparsity_penalty,
              n_epochs=config.MAX_TRAINING_EPOCHS,
              n_splits=config.CV_N_SPLITS,
              seed=config.SEED,
              batch_size=config.batch_size,
              shuffle=False)

    stop = timeit.default_timer()  # -----------------
    print(stop - start)
    # save_result(mean)
    print('OK')
コード例 #2
0
ファイル: bayesopt.py プロジェクト: kjjjjjjj/vibration-sae
def objective(params):
    print('-------Hyper-parameters-------')
    print(params)
    print('-------Hyper-parameters-------')
    config.new_BO_run()
    save_txt(string_of_hyperparameters(params), 'hyperparameters.txt')

    model = AE(n_input=1, n_hidden=params[0], n_output=1, n_layers=1)
    # timestamp = config.TIMESTAMP_CHANNEL
    channel = config.CHANNEL  # 'acc2__'
    data = dataset[:, [channel]]
    target = dataset[:, [channel]]
    score = cv(model,
               data,
               target,
               temperature=params[1],
               weight_decay=params[2],
               learning_rate=params[3],
               sparsity=params[4],
               sparsity_penalty=params[5],
               n_epochs=config.MAX_TRAINING_EPOCHS,
               n_splits=config.CV_N_SPLITS,
               seed=config.SEED,
               batch_size=int(params[10]),
               shuffle=False)
    return score
コード例 #3
0
    config = Configuration()
    config.parse_commandline()
    config.new_experiment()

    X = Dataset()[config.CHANNEL]  # already segmented and overlapped!

    tscv = TimeSeriesSplit(n_splits=5)
    for train_index, test_index in tscv.split(X):
        print('train_index: {}'.format(train_index))
        print('test_index: {}'.format(test_index))

        ae = Model(
            AE(
                n_input=1,
                n_hidden=config.n_hidden,
                n_output=1,
                n_layers=config.n_layers,
            ))

        monitor = BaseMonitor(ae, temperature, learning_rate, weight_decay)

        X_train = X[train_index]
        X_train_zipped = torch.utils.data.DataLoader(
            ZipDataset(X_train[:, :-1], X_train[:, 1:]),
            batch_size=config.batch_size,
            shuffle=False)
        monitor.fit(X_train)

        X_test = X[test_index]
        X_test_zipped = torch.utils.data.DataLoader(
            ZipDataset(X_test[:, :-1], X_test[:, 1:]),
コード例 #4
0
def main(args, ts, logPath):

    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    dataset = MNIST(
        root='../data', train=True, transform=transforms.ToTensor(),
        download=True)
    data_loader = DataLoader(
        dataset=dataset, batch_size=args.batch_size, shuffle=True)

    def loss_fn(recon_x, x):
        BCE = torch.nn.functional.binary_cross_entropy(
            recon_x.view(-1, 28*28), x.view(-1, 28*28), reduction='sum')

        return BCE / x.size(0)

    ae = AE(
        encoder_layer_sizes=args.encoder_layer_sizes,
        latent_size=args.latent_size,
        decoder_layer_sizes=args.decoder_layer_sizes,
        conditional=args.conditional,
        num_labels=10 if args.conditional else 0).to(device)

    optimizer = torch.optim.Adam(ae.parameters(),
                                 lr=args.learning_rate)

    logs = defaultdict(list)

    for epoch in range(args.epochs):

        tracker_epoch = defaultdict(lambda: defaultdict(dict))

        for iteration, (x, y) in enumerate(data_loader):

            x, y = x.to(device), y.to(device)

            if args.conditional:
                recon_x, z = ae(x, y)
            else:
                recon_x, z = ae(x)

            for i, yi in enumerate(y):
                id = len(tracker_epoch)
                tracker_epoch[id]['x'] = z[i, 0].item()
                if z.shape[1] != 1:
                    tracker_epoch[id]['y'] = z[i, 1].item()
                else:
                    tracker_epoch[id]['y'] = 0
                tracker_epoch[id]['label'] = yi.item()

            loss = loss_fn(recon_x, x)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            logs['loss'].append(loss.item())

            if iteration % args.print_every == 0 or iteration == len(data_loader)-1:
                logContent = "Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".format(
                    epoch, args.epochs, iteration, len(data_loader)-1, loss.item())
                print(logContent)
                with open(logPath, 'a') as log:
                    for arg in vars(args):
                        log.write(logContent + "\n")

                if args.conditional:
                    c = torch.arange(0, 10).long().unsqueeze(1)
                    x = ae.inference(n=c.size(0), c=c)
                else:
                    x = ae.inference(n=10)

                plt.figure()
                plt.figure(figsize=(5, 10))
                for p in range(10):
                    plt.subplot(5, 2, p+1)
                    if args.conditional:
                        plt.text(
                            0, 0, "c={:d}".format(c[p].item()), color='black',
                            backgroundcolor='white', fontsize=8)
                    plt.imshow(x[p].view(28, 28).data.numpy())
                    plt.axis('off')

                if not os.path.exists(os.path.join(args.fig_root, str(ts))):
                    if not(os.path.exists(os.path.join(args.fig_root))):
                        os.mkdir(os.path.join(args.fig_root))
                    os.mkdir(os.path.join(args.fig_root, str(ts)))

                plt.savefig(
                    os.path.join(args.fig_root, str(ts),
                                 "E{:d}I{:d}.png".format(epoch, iteration)),
                    dpi=300)
                plt.clf()
                plt.close('all')

        df = pd.DataFrame.from_dict(tracker_epoch, orient='index')

        g = sns.lmplot(
            x='x', y='y', hue='label', data=df.groupby('label').head(100),
            fit_reg=False, legend=True, scatter_kws={"s": 20}, markers=".")

        g.savefig(os.path.join(
            args.fig_root, str(ts), "E{:d}-Dist.png".format(epoch)),
            dpi=300)
コード例 #5
0
 def __init__(self):
     tf.random.set_seed(SEED)
     np.random.seed(SEED)
     #Get Model
     self.net = AE()
コード例 #6
0
class TrainAE:
    def __init__(self):
        tf.random.set_seed(SEED)
        np.random.seed(SEED)
        #Get Model
        self.net = AE()

    def train_batch(self, batch):
        with tf.GradientTape(persistent=True) as t:
            t.watch(self.net.trainables)
            recon_loss = self.net.calc_loss(batch)
        #Get Gradients
        ae_grads = t.gradient(recon_loss, self.net.trainables)

        #Apply Grads
        self.net.update(ae_grads)

        return recon_loss

    def train(self, data, num_epochs, batch_size):
        #Unpack Data
        x_train = data['x_train']
        y_train = data['y_train']

        x_val = data['x_val']
        y_val = data['y_val']

        train_losses = []
        val_losses = []

        batch_print = 1000
        print("Start Training For {} Epochs".format(num_epochs))
        for ep in range(num_epochs):
            #Shuffle
            np.random.shuffle(x_train)
            batch_iters = int(x_train.shape[0] / batch_size)

            batch_loss = 0
            print("\nEpoch {}".format(ep + 1))
            for i in range(batch_iters):
                #run batch
                cur_idx = i * batch_size
                batch = x_train[cur_idx:cur_idx + batch_size]
                # idx = np.random.randint(0, x_train.shape[0], batch_size)
                # batch = x_train[idx]

                train_recon = self.train_batch(batch)
                batch_loss += train_recon
                #self.net.apply_grads(grads)
                if (i + 1) % batch_print == 0:
                    print('Batch loss {} recon:{:.5f}'.format(
                        i + 1, train_recon))

            #train epoch loss
            ep_loss = batch_loss / batch_iters
            train_losses.append(ep_loss)

            #Val Loss
            val_recon = self.calc_loss(x_val)
            val_losses.append(val_recon)

            print('Epoch recon loss Train:{:.5f} Val:{:.5f}'.format(
                ep_loss, val_recon))

        recon_plot = plot_losses(train_losses, val_losses, 'Recon Loss')
        # recon_plot.set_title('Recon Loss')
        recon_plot.savefig('./plot/ae_recon_loss.png')
コード例 #7
0
    config.parse_commandline()
    config.new_experiment()

    X = Dataset()[config.CHANNEL]  # already segmented and overlapped!
    # hp = Hyperparameters()

    tscv = TimeSeriesSplit(n_splits=5)
    for train_index, test_index in tscv.split(X):
        print('train_index: {}'.format(train_index))
        print('test_index: {}'.format(test_index))

        ae = Model(
            AE(
                # architecture hyperparameters
                n_input=1,
                n_hidden=config.n_hidden,
                n_output=1,
                n_layers=config.n_layers,
            ))

        monitor = BaseMonitor(ae, temperature, learning_rate, weight_decay)

        X_train = X[train_index]
        X_train_zipped = torch.utils.data.DataLoader(
            ZipDataset(X_train[:, :-1], X_train[:, 1:]),
            batch_size=config.batch_size,
            shuffle=False)
        monitor.fit(X_train)

        X_test = X[test_index]
        X_test_zipped = torch.utils.data.DataLoader(
コード例 #8
0
    args.type_mod = 'normal'
    encoder = EncoderCNNGRU(args)
    args.cnn_size = encoder.cnn_size
    decoder = DecoderCNNGRU(args)
elif args.encoder_type == 'cnn-gru-embed':
    args.type_mod = 'normal'
    encoder = EncoderCNNGRU(args)
    args.cnn_size = encoder.cnn_size
    decoder = DecoderCNNGRUEmbedded(args)
elif args.encoder_type == 'hierarchical':
    encoder = EncoderHierarchical(args)
    decoder = DecoderHierarchical(args)
print('[Creating model]')
# Then select different models
if args.model == 'ae':
    model = AE(encoder, decoder, args).float()
elif args.model == 'vae':
    model = VAE(encoder, decoder, args).float()
elif args.model == 'wae':
    model = WAE(encoder, decoder, args).float()
else:
    print("Oh no, unknown model " + args.model + ".\n")
    exit()
# Send model to the device
model.to(args.device)
# Initialize the model weights
print('[Initializing weights]')
if args.initialize:
    model.apply(init_classic)

# %%