Ejemplo n.º 1
0
xTest = processFeatureFile(TEST_FEATURES_FILE)

# Ensure no features are 0.0
xTrain = addE(xTrain)
xDev = addE(xDev)
xTest = addE(xTest)

features = [4]
hidden_layers = [2]

xTrain = reduceFeatures(xTrain, features)
xDev = reduceFeatures(xDev, features)
xTest = reduceFeatures(xTest, features)

testing = True

if (testing):
    predictions = runNN(xTrain, yTrain, xTest, hidden_layers=hidden_layers)
else:
    predictions = runNN(xTrain, yTrain, xDev, hidden_layers=hidden_layers)
    evaluate(yDev, predictions)

writeToFile(predictions)

totalEnd = timer()
print("Features: {}".format(features))
print("Hidden Layers: {}".format(hidden_layers))
print("Total elapsed time: {:.2f} secs".format(totalEnd - totalStart))

################################################################################
input_size=(513,14)

q_size = 120
k = 10
# output_size=5
in_channels = 513
lr = 0.01
train_dataloaders = Yahoo('train', dir='A3Benchmark', norm=True, q_size=120, batch_size=128, ratio=0.7)
test_dataloaders = Yahoo('test', dir='A3Benchmark', norm=True, q_size=120, batch_size=128, ratio=0.7)
nperseg = 10
noverlap = 2

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for id, (train_dataloader, test_dataloader) in enumerate(zip(train_dataloaders, test_dataloaders)):
    valid = 0
    evaluator = evaluate('yahoo')
    net = ConvAE(k, in_channels).to(device)
    optimizer = optim.Adam(net.parameters(), lr=lr)
    scheduler = StepLR(optimizer, step_size=10, gamma=0.75)
    converger = converge()
    file_id = train_dataloaders.files[train_dataloaders.idx]
    cnt = 0
    start_time = time.time()
    break_point = False
    # if id == 0:
    #     print('id 0 is skipped')
    #     continue
    for e in range(500):
        scheduler.step()
        if not break_point:
            valid = 0
Ejemplo n.º 3
0
                         dir='A3Benchmark',
                         norm=True,
                         q_size=q_size,
                         batch_size=batch_size,
                         ratio=0.7)
data_type = 'yahoo'

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = WAE(input_n, hidden_n, k, output_size, device, clipping).to(device)
net.apply(weight_init)
optimizer = optim.Adam(net.parameters(), lr=lr)

# criterion = nn.MSELoss(reduction='mean')
for i, train_loader in enumerate(train_dataloaders):
    evaluator = evaluate(data_type)
    for e in range(epoch):
        valid = 0
        for b, data in enumerate(train_loader):
            # batch_n = len(train_loader)
            x, y = data['value'].to(device), data['label'].to(device)
            x, y = Variable(x), Variable(y)

            optimizer.zero_grad()

            x_mu, x_logvar, z_mean_, z_logvar_ = net(x)
            x_std = x_logvar.mul(0.5).exp_()
            recon_x = sampling(mu=x_mu, sigma=x_std)
            z_std_ = z_logvar_.exp_().sqrt()

            z = sampling(sigma=z_std, size=(z_mean_.size()))