def ENres(data_use="full"):

    x, y, xt = utils.loaddata('NASp',
                              1,
                              dir="~/physics_guided_nn/data/",
                              raw=True)
    y = y.to_frame()

    if data_use == "sparse":
        x, y = utils.sparse(x, y)

    print("X", x, "Y", y)
    print('Length', len(x), len(y))

    splits = len(x.index.year.unique())

    x.index, y.index = np.arange(0, len(x)), np.arange(0, len(y))

    # orignial grid size: 800
    arch_grid = HP.ArchitectureSearchSpace(x.shape[1], y.shape[1], 8, 4)

    # architecture search
    layersizes, agrid = HP.ArchitectureSearch(arch_grid, {
        'epochs': 100,
        'batchsize': 8,
        'lr': 0.001
    },
                                              x,
                                              y,
                                              splits,
                                              "arSres",
                                              hp=True)

    agrid.to_csv(f"~/physics_guided_nn/results/NresAS_{data_use}.csv")

    # Hyperparameter Search Space
    # original search space size: 800
    hpar_grid = HP.HParSearchSpace(8)

    # Hyperparameter search
    hpars, grid = HP.HParSearch(layersizes,
                                hpar_grid,
                                x,
                                y,
                                splits,
                                "hpres",
                                hp=True)

    print('hyperparameters: ', hpars)

    grid.to_csv(f"~/physics_guided_nn/results/NresHP_{data_use}.csv")
def ENmlp(data_use="full", splits=None):

    x, y, xt = utils.loaddata('NAS',
                              1,
                              dir="~/physics_guided_nn/data/",
                              raw=True)
    y = y.to_frame()

    if data_use == "sparse":
        x, y = utils.sparse(x, y)

    if splits is None:
        splits = len(x.index.year.unique())

    print(splits)
    print(x, y)

    x.index, y.index = np.arange(0, len(x)), np.arange(0, len(y))

    arch_grid = HP.ArchitectureSearchSpace(x.shape[1], y.shape[1], 5, 4)

    # architecture search
    # original: use grid of 800 and epochs:100
    layersizes, argrid = HP.ArchitectureSearch(arch_grid, {
        'epochs': 10,
        'batchsize': 8,
        'lr': 0.001
    },
                                               x,
                                               y,
                                               splits,
                                               "arSmlp",
                                               hp=True)
    argrid.to_csv(f"~/physics_guided_nn/results/NmlpAS_{data_use}.csv")

    # Hyperparameter Search Space
    hpar_grid = HP.HParSearchSpace(5)

    # Hyperparameter search
    hpars, grid = HP.HParSearch(layersizes,
                                hpar_grid,
                                x,
                                y,
                                splits,
                                "hpmlp",
                                hp=True)

    print('hyperparameters: ', hpars)
    grid.to_csv(f"~/physics_guided_nn/results/NmlpHP_{data_use}.csv")
Exemple #3
0
def main(args):
    use_cuda = torch.cuda.is_available()
    print('==> Loading data..')
    train_loader, test_loader = loaddata(args)

    print('==> Loading model..')
    encoder, discriminator, classifier = loadmodel(args)

    print('==> Training starts..')
    torch.manual_seed(123)
    classifier, encoder = train_er_classifier(train_loader,
                                              test_loader,
                                              encoder,
                                              discriminator,
                                              classifier,
                                              use_cuda=use_cuda,
                                              n_z=args['n_z'],
                                              sigma=args['sigma'],
                                              num_epoch=args['epochs'],
                                              lr=args['lr'],
                                              LAMBDA=args['LAMBDA'],
                                              LAMBDA0=args['LAMBDA0'],
                                              LAMBDA1=args['LAMBDA1'],
                                              delay=args['delay'],
                                              file_name=args['file_name'],
                                              epsilon=args['epsilon'],
                                              k=args['k'],
                                              a=args['a'],
                                              print_every=args['print_every'],
                                              dataset=args['dataset'],
                                              attack_range=args['range'])

    test(test_loader, classifier, encoder=encoder, use_cuda=True)
    print('==> Testing the model against PGD attack..')
    testattack(classifier,
               encoder,
               test_loader,
               epsilon=args['epsilon'],
               k=args['k'],
               a=args['a'],
               dataset=args['range'],
               use_cuda=True)
Exemple #4
0
#calculator for loan maturity

#imports
from datetime import datetime
import utils

#####
interests = utils.loaddata(utils.interests)
txs = utils.loaddata(utils.txs)
list_txs = utils.group(txs)
list_interests = utils.group(interests)
del interests, txs

list_txs = utils.reindex(list_txs)
list_interests = utils.reindex(list_interests)
list_txs[7] = list_txs[7].append(list_txs[8])
del list_txs[8]

list_txs.insert(6, list_txs[3])
del list_txs[3]

symbols = []
comp_interest = []
apys = []
for i in list_interests:
    symbols.append(i['SYMBOL'][0])
    comp_interest.append(i['COMP'].mean())
    apys.append(i['APY'].mean())
####

Exemple #5
0
# !/usr/bin/env python
# coding: utf-8
import utils
import HP
import torch
import pandas as pd
import numpy as np
import random
import training

x, y, xt = utils.loaddata('NAS', 1, dir="./data/", raw=True)

ypreles = xt.drop(xt.columns.difference(['GPPp']), axis=1)[1:]
ypreles.index = x.index

train_x, train_y, train_yp = x[x.index.year.isin([2004, 2006])], y[y.index.year.isin([2004, 2006])].to_frame(), ypreles[ypreles.index.year.isin([2004, 2006])]
test_x, test_y, test_yp = x[x.index.year == 2008], y[y.index.year == 2008].to_frame(),  ypreles[ypreles.index.year == 2008]


train_x.index, train_y.index, train_yp.index = np.arange(0, len(train_x)), np.arange(0, len(train_y)), np.arange(0, len(train_yp))
test_x.index, test_y.index, test_yp.index = np.arange(0, len(test_x)), np.arange(0, len(test_y)), np.arange(0, len(test_yp))

print(train_x, train_y, train_yp)

res_as = pd.read_csv("NregAS.csv")
a = res_as.loc[res_as.val_loss.idxmin()][1:5]
b = a.to_numpy()
layersizes = list(b[np.isfinite(b)].astype(int))
print('layersizes', layersizes)

model_design = {'layersizes': layersizes}
Exemple #6
0
def main():
    parser = argparse.ArgumentParser(
        description='simple 3D convolution for action recognition')
    parser.add_argument('--data_dir', type=str, default="./data/")
    parser.add_argument('--batch', type=int, default=16)
    parser.add_argument('--epoch', type=int, default=10000)
    parser.add_argument('--color', type=bool, default=True)
    parser.add_argument('--skip', type=bool, default=True)
    parser.add_argument('--depth', type=int, default=32)
    parser.add_argument('--calculateEvaluationCCC',
                        type=str,
                        default="./data/calculateEvaluationCCC.py")
    parser.add_argument('--validationCSV',
                        type=str,
                        default="./data/omg_ValidationVideos.csv")
    parser.add_argument('--testCSV',
                        type=str,
                        default="./data/omg_TestVideos.csv")
    parser.add_argument('--trainCSV',
                        type=str,
                        default="./data/omg_TrainVideos.csv")

    args = parser.parse_args()
    timestamp = str(int(time.time()))
    args.out_dir = os.path.abspath(os.path.join("runs", timestamp))

    all_x = []
    all_y = []
    nb_classes = 2

    for mode in ["Train", "Validation", "Test"]:
        img_rows, img_cols, frames = 32, 32, args.depth
        channel = 3 if args.color else 1
        fname_npz = os.path.join(args.data_dir,
                                 'dataset_{}_{}_{}_{}.npz').format(
                                     mode, nb_classes, args.depth, args.skip)
        vid3d = videoto3d.Videoto3D(img_rows, img_cols, frames)
        print(fname_npz)
        if os.path.exists(fname_npz):
            loadeddata = np.load(fname_npz)
            X, Y = loadeddata["X"], loadeddata["Y"]
            print("Dataset found already for mode ", mode)
        else:
            x, y = utils.loaddata("{}_videos".format(mode), vid3d, args, mode,
                                  args.color, args.skip)
            X = x.reshape((x.shape[0], img_rows, img_cols, frames, channel))
            X = X.astype('float32')
            Y = np.array(y)
            np.savez(fname_npz, X=X, Y=Y)
            print('Saved dataset to dataset.npz.')
        all_x.append(X)
        all_y.append(Y)

    X_train, X_test, _ = all_x
    Y_train, Y_test, _ = all_y

    print('Train : X_shape:{}\nY_shape:{}'.format(X_train.shape,
                                                  Y_train.shape))
    print('Validation : X_shape:{}\nY_shape:{}'.format(X_test.shape,
                                                       Y_test.shape))

    # Define model
    model = Sequential()
    model.add(
        Conv3D(32,
               kernel_size=(5, 5, 5),
               input_shape=(X.shape[1:]),
               padding='same'))
    # model.add(Activation('relu'))
    # model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same'))
    model.add(Activation('softmax'))
    model.add(MaxPooling3D(pool_size=(4, 4, 4), padding='same'))
    model.add(Dropout(0.2))
    #
    model.add(Conv3D(32, kernel_size=(5, 5, 5), padding='same'))
    # model.add(Activation('relu'))
    # model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same'))
    model.add(Activation('softmax'))
    model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same'))

    model.add(Dropout(0.2))

    model.add(Flatten())

    model.add(Dense(128, activation='sigmoid'))

    model.add(Dropout(0.2))

    model.add(Dense(2, activation='linear'))

    model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mse'])
    model.summary()

    # for i, j in enumerate(model.layers):
    #     print(i,j)
    # sys.exit()

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)

    model_json = model.to_json()
    with open(os.path.join(args.out_dir, 'model.json'), 'w') as json_file:
        json_file.write(model_json)

    filepath = os.path.join(
        args.out_dir, "weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5")
    checkpoint = ModelCheckpoint(filepath)
    predictions = utils.prediction_history(X_test, model, args)
    callbacks_list = [checkpoint, predictions]

    history = model.fit(X_train,
                        Y_train,
                        validation_data=(X_test, Y_test),
                        batch_size=args.batch,
                        epochs=args.epoch,
                        verbose=1,
                        shuffle=True,
                        callbacks=callbacks_list)
def evalDA(da):
    '''
    da specifies Domain Adaptation:                                                                                                                                       da = 1: using pretrained weight and fully retrain network                                                                                                 
        da = 2: retrain only last layer.
    '''

    # Load hyytiala
    x, y, xt = utils.loaddata('validation',
                              1,
                              dir="~/physics_guided_nn/data/",
                              raw=True)
    y = y.to_frame()

    print(x.index.year.unique())
    train_x = x[~x.index.year.isin([2008])]
    train_y = y[~y.index.year.isin([2008])]

    splits = len(train_x.index.year.unique())

    test_x = x[x.index.year == 2008]
    test_y = y[y.index.year == 2008]
    train_x.index, train_y.index = np.arange(0, len(train_x)), np.arange(
        0, len(train_y))
    test_x.index, test_y.index = np.arange(0, len(test_x)), np.arange(
        0, len(test_y))
    print('XY: ', x, y)

    # Load results from NAS
    # Architecture
    res_as = pd.read_csv("~/physics_guided_nn/results/NmlpAS.csv")
    a = res_as.loc[res_as.val_loss.idxmin()][1:5]
    b = a.to_numpy()
    layersizes = list(b[np.isfinite(b)].astype(int))
    print('layersizes', layersizes)

    model_design = {'layersizes': layersizes}

    # Hyperparameters
    res_hp = pd.read_csv("~/physics_guided_nn/results/NmlpHP.csv")
    a = res_hp.loc[res_hp.val_loss.idxmin()][1:3]
    b = a.to_numpy()
    bs = b[1]

    # Learningrate
    res_hp = pd.read_csv("~/physics_guided_nn/results/mlp_lr.csv")
    a = res_hp.loc[res_hp.val_loss.idxmin()][1:3]
    b = a.to_numpy()
    lr = b[0]

    # originally 5000 epochs
    hp = {'epochs': 10, 'batchsize': int(bs), 'lr': lr}
    print('HYPERPARAMETERS', hp)
    data_dir = "/home/fr/fr_fr/fr_mw1205/physics_guided_nn/data/"
    data = "mlpDA_pretrained"

    tloss = training.train_cv(hp,
                              model_design,
                              train_x,
                              train_y,
                              data_dir,
                              splits,
                              data,
                              domain_adaptation=da,
                              reg=None,
                              emb=False,
                              hp=False)
    print(tloss)
    train_loss = tloss['train_loss']
    val_loss = tloss['val_loss']
    t1 = []
    t2 = []
    t3 = []
    t4 = []
    t5 = []
    #    t6 = []
    # originally use 5000 epochs!
    for i in range(10):
        t1.append(train_loss[0][i])
        t2.append(train_loss[1][i])
        t3.append(train_loss[2][i])
        t4.append(train_loss[3][i])
        t5.append(train_loss[4][i])
#       t6.append(train_loss[5][i])
    v1 = []
    v2 = []
    v3 = []
    v4 = []
    v5 = []
    #   v6 = []
    for i in range(10):
        v1.append(val_loss[0][i])
        v2.append(val_loss[1][i])
        v3.append(val_loss[2][i])
        v4.append(val_loss[3][i])
        v5.append(val_loss[4][i])


#        v6.append(val_loss[5][i])

    pd.DataFrame({
        "f1": v1,
        "f2": v2,
        "f3": v3,
        "f4": v4,
        "f5": v5
    }).to_csv(f'~/physics_guided_nn/results/mlpDA{da}_vloss.csv')
    #tloss = training.train(hp, model_design, train_x, train_y, data_dir, None, data, reg=None, emb=False)
    #tloss = cv.train(hp, model_design, train_x, train_y, data_dir=data_dir, data=data, splits=splits)
    #print("LOSS", tloss)
    pd.DataFrame({
        "f1": t1,
        "f2": t2,
        "f3": t3,
        "f4": t4,
        "f5": t5
    }).to_csv(f'~/physics_guided_nn/results/mlpDA{da}_trainloss.csv')

    # Evaluation
    mse = nn.MSELoss()
    mae = nn.L1Loss()
    x_train, y_train = torch.tensor(train_x.to_numpy(),
                                    dtype=torch.float32), torch.tensor(
                                        train_y.to_numpy(),
                                        dtype=torch.float32)
    x_test, y_test = torch.tensor(test_x.to_numpy(),
                                  dtype=torch.float32), torch.tensor(
                                      test_y.to_numpy(), dtype=torch.float32)

    train_rmse = []
    train_mae = []
    test_rmse = []
    test_mae = []

    preds_train = {}
    preds_test = {}

    for i in range(splits):
        i += 1
        #import model
        model = models.NMLP(x.shape[1], y.shape[1], model_design['layersizes'])
        model.load_state_dict(
            torch.load(''.join((data_dir, f"mlpDA{da}_model{i}.pth"))))
        model.eval()
        with torch.no_grad():
            p_train = model(x_train)
            p_test = model(x_test)
            preds_train.update({f'train_mlp{i}': p_train.flatten().numpy()})
            preds_test.update({f'test_mlp{i}': p_test.flatten().numpy()})
            train_rmse.append(mse(p_train, y_train).tolist())
            train_mae.append(mae(p_train, y_train).tolist())
            test_rmse.append(mse(p_test, y_test).tolist())
            test_mae.append(mae(p_test, y_test).tolist())

        performance = {
            'train_RMSE': train_rmse,
            'train_MAE': train_mae,
            'test_RMSE': test_rmse,
            'test_mae': test_mae
        }

        print(preds_train)

    pd.DataFrame.from_dict(performance).to_csv(
        f'~/physics_guided_nn/results/mlpDA{da}_eval_performance.csv')
    pd.DataFrame.from_dict(preds_train).to_csv(
        f'~/physics_guided_nn/results/mlpDA{da}_eval_preds_train.csv')
    pd.DataFrame.from_dict(preds_test).to_csv(
        f'~/physics_guided_nn/results/mlpDA{da}_eval_preds_test.csv')
Exemple #8
0
import utils
import models
import torch.nn as nn
import torch.optim as optim
from sklearn import metrics
from sklearn.model_selection import train_test_split
import random
import os
from torch.utils.data import TensorDataset, DataLoader
from torch import Tensor
import csv
import training

## Load data for pretraining

x, y, r  = utils.loaddata('simulations', 1, dir="~/physics_guided_nn/data/")
y = y.to_frame()

## Split into training and test

train_x, test_x, train_y, test_y = train_test_split(x, y)
print(train_x.shape)
print(test_x.shape)
print(train_y.shape)
print(test_y.shape)
print(type(train_x))
print(type(train_y))

## Pretraining in n-fold CV: choose n the same as in evalmlpDA.py
splits = 6
# !/usr/bin/env python
# coding: utf-8
import utils
import HP
import torch
import pandas as pd
import numpy as np
import random
import training

x, y, xt = utils.loaddata('exp2p', 1, dir="./data/", raw=True)

# select NAS data
print(x.index)

x = x[x.index.year == 2004]
y = y[y.index.year == 2004]

x = x.drop(pd.DatetimeIndex(['2004-01-01']))
y = y.drop(pd.DatetimeIndex(['2004-01-01']))
print(x, y)
splits = 8

x.index = np.arange(0, len(x))
y.index = np.arange(0, len(y))

train_idx = np.arange(0, np.ceil(len(x) / splits) * 7)
test_idx = np.arange(np.ceil(len(x) / splits) * 7, len(x))

train_x, train_y = x[x.index.isin(train_idx)], y[y.index.isin(train_idx)]
test_x, test_y = x[x.index.isin(test_idx)], y[y.index.isin(test_idx)]
# !/usr/bin/env python
# coding: utf-8
import utils
import HP
import utils
import trainloaded
import torch
import pandas as pd
import numpy as np

x, y, mn, std, xt = utils.loaddata('NAS', 0, dir="./data/", raw=True)
xt = xt.drop(['date', 'GPP', 'ET'], axis=1)

yp_tr = pd.read_csv("./data/train_soro.csv")
yp_te = pd.read_csv("./data/test_soro.csv")
yp_tr.index = pd.DatetimeIndex(yp_tr['date'])
yp_te.index = pd.DatetimeIndex(yp_te['date'])
yptr = yp_tr.drop(yp_tr.columns.difference(['GPPp']), axis=1)
ypte = yp_te.drop(yp_te.columns.difference(['GPPp']), axis=1)

yp = pd.concat([yptr, ypte])

ypp = (yp - mn['GPP']) / std['GPP']

splits = len(x.index.year.unique())

x.index, y.index, ypp.index = np.arange(0, len(x)), np.arange(
    0, len(y)), np.arange(0, len(ypp))

arch_grid = HP.ArchitectureSearchSpace(x.shape[1],
                                       y.shape[1],
def gen_simulations(n, data_dir='~/physics_guided_nn/data/'):

    x, y, xt = utils.loaddata('validation',
                              None,
                              dir="~/physics_guided_nn/data/",
                              raw=True,
                              doy=False)
    y = y.to_frame()

    # Hold out a year as test data
    train_x = x[~x.index.year.isin([2012])]
    train_y = y[~y.index.year.isin([2012])]

    print(train_x)

    train_x['year'] = pd.DatetimeIndex(train_x['date']).year
    train_x = train_x.drop(['date'], axis=1)

    gamTair = GAM(s(0, by=1, n_splines=200,
                    basis='cp')).fit(train_x[['DOY', 'year']], train_x['Tair'])
    with open('/home/fr/fr_fr/fr_mw1205/physics_guided_nn/results/gamTair',
              'wb') as f:
        pickle.dump(gamTair, f)
    gamPrecip = GAM(s(0, by=1, n_splines=200,
                      basis='cp')).fit(train_x[['DOY', 'year']],
                                       train_x['Precip'])
    with open('/home/fr/fr_fr/fr_mw1205/physics_guided_nn/results/gamPrecip',
              'wb') as f:
        pickle.dump(gamPrecip, f)
    gamVPD = GAM(s(0, by=1, n_splines=200,
                   basis='cp')).fit(train_x[['DOY', 'year']], train_x['VPD'])
    with open('/home/fr/fr_fr/fr_mw1205/physics_guided_nn/results/gamVPD',
              'wb') as f:
        pickle.dump(gamVPD, f)
    gamPAR = GAM(s(0, by=1, n_splines=200,
                   basis='cp')).fit(train_x[['DOY', 'year']], train_x['PAR'])
    with open('/home/fr/fr_fr/fr_mw1205/physics_guided_nn/results/gamPAR',
              'wb') as f:
        pickle.dump(gamPAR, f)
    gamfapar = GAM(s(0, by=1, n_splines=200,
                     basis='cp')).fit(train_x[['DOY', 'year']],
                                      train_x['fapar'])
    with open('/home/fr/fr_fr/fr_mw1205/physics_guided_nn/results/gamfapar',
              'wb') as f:
        pickle.dump(gamfapar, f)

    p = parameter_samples(n_samples=n)
    #np.savetext('parameter_simulations.csv', p, delimiter=';')
    pt = torch.tensor(p, dtype=torch.float64)

    d = []

    for i in range(n):
        c = climate_simulations(train_x)
        #np.savetext('climate_simulations.csv', c.to_numpy(), delimiter=';')
        ct = torch.tensor(c.to_numpy(), dtype=torch.float64)

        out = models.physical_forward(parameters=pt[i, :], input_data=ct)
        out = out.detach().numpy()
        #np.savetext('gpp_simulations.csv')

        c['GPP'] = out
        d.append(c)

    d = pd.concat(d)
    d.to_csv(''.join((data_dir, 'DA_preles_sims.csv')), index=False)
Exemple #12
0
 def loaddata(self):
     #first get all the tree data
     self.data = utils.loaddata(self.name)
# !/usr/bin/env python
# coding: utf-8
import utils
import HP
import utils
import training
import torch
import pandas as pd
import numpy as np

x, y, xt = utils.loaddata('NAS', 0, dir="./data/", raw=True)

xt = xt.drop(['date', 'GPP', 'ET', 'GPPp', 'ETp', 'SWp', 'Unnamed: 0'], axis=1)
print(xt)

l, m, yp = utils.loaddata('NAS', 0, dir="./data/", raw=True)

yp = yp.drop(yp.columns.difference(['GPPp']), axis=1)
print(yp)
splits = len(x.index.year.unique())
x.index, y.index, yp.index = np.arange(0, len(x)), np.arange(
    0, len(y)), np.arange(0, len(yp))

y = y.to_frame()

arch_grid = HP.ArchitectureSearchSpace(x.shape[1],
                                       y.shape[1],
                                       140,
                                       4,
                                       emb=True)
import numpy as np
import utils
import models
import torch.nn as nn
import torch.optim as optim
from sklearn import metrics
from sklearn.model_selection import train_test_split
import random
import os
from torch.utils.data import TensorDataset, DataLoader
from torch import Tensor
import csv
import training

# Load hyytiala
x, y, xt = utils.loaddata('validation', 1, dir="./data/", raw=True)
y = y.to_frame()

print(x.index.year.unique())
train_x = x[~x.index.year.isin([2007, 2008])]
train_y = y[~y.index.year.isin([2007, 2008])]

splits = len(train_x.index.year.unique())

test_x = x[x.index.year == 2008]
test_y = y[y.index.year == 2008]
train_x.index, train_y.index = np.arange(0, len(train_x)), np.arange(
    0, len(train_y))
test_x.index, test_y.index = np.arange(0,
                                       len(test_x)), np.arange(0, len(test_y))
print('XY: ', x, y)
import pandas as pd
import numpy as np
import utils
import models
import torch.nn as nn
import torch.optim as optim
from sklearn import metrics
from sklearn.model_selection import train_test_split
import random
import os
from torch.utils.data import TensorDataset, DataLoader
from torch import Tensor
import csv
import training

x, y, xt = utils.loaddata('OF', 0, dir="./data/", raw=True)
print('n', x, y, xt)
yp = pd.read_csv("./data/train_soro.csv")
yp.index = pd.DatetimeIndex(yp['date'])

xt.index = pd.DatetimeIndex(xt.date)

SW = np.concatenate((yp.SWp.values, yp.SWp.values))

swmn = np.mean(SW)
swstd = np.std(SW)
rtr = xt[xt.index.year == 2002]
rte = xt[xt.index.year == 2003]

ypg = yp.drop(yp.columns.difference(['GPPp']), axis=1)
rtr = rtr.drop(['date', 'ET', 'GPP'], axis=1)
# !/usr/bin/env python
# coding: utf-8
import utils
import HP
import torch
import pandas as pd
import numpy as np

x, y, xt, yp = utils.loaddata('exp2', 1, dir="./data/", raw=True)
x = x.drop(pd.DatetimeIndex(['2004-01-01']))
y = y.drop(pd.DatetimeIndex(['2004-01-01']))
yp = yp.drop(pd.DatetimeIndex(['2004-01-01']))
yp = yp.drop(yp.columns.difference(['GPPp']), axis=1)

yp = yp[yp.index.year == 2004]
x = x[x.index.year == 2004]
y = y[y.index.year == 2004]
y = y.to_frame()


print(x, y, yp)

#print(len(x), len(y))
splits = 8
x.index, y.index, yp.index = np.arange(0, len(x)), np.arange(0, len(y)), np.arange(0, len(yp))

arch_grid = HP.ArchitectureSearchSpace(x.shape[1], y.shape[1], 800, 4)

# architecture search
layersizes, ag = HP.ArchitectureSearch(arch_grid, {'epochs': 100, 'batchsize': 8, 'lr':0.001}, x, y, splits, "EX2_arSres2", exp=2, hp=True, res=2, ypreles = yp)
ag.to_csv("./EX2_res2AS.csv")
Exemple #17
0
            else:
                sysrun('rhythmbox-client --play-uri=\"' + songlocation +
                       "\" &")
                print("----------------Playing  " + matches[0] +
                      "--------------------")
                STARTED = True
        except IndexError:
            print("\t\t\t!!No match for " + title + " :(!!")
            continue


# main
sysrun('rhythmbox-client --clear-queue &')
sysrun('rhythmbox-client --stop')

data = loaddata('./objects/df.obj')
songlist = data['song'].tolist()

with open(PLAYLISTFILEPATH) as music:
    try:
        playlists = load_json(music.read())
    except JSONDecodeError:
        print("could not read file")
        sysrun("sleep 10")

genres = list(playlists.keys())

options = DataFrame({"playlist": genres})

newqueue = playlists.get("newqueue", [])