Пример #1
0
# Operations on the GPU if available
if torch.cuda.is_available():
    DEVICE = 'cuda'
else:
    DEVICE = 'cpu'

device = torch.device(DEVICE)

set_seed_everywhere(SEED)

# %% Data

params['t_span'] = (params['t_min'], params['t_max'])
params['p_span'] = (params['p_min'], params['p_max'])
data = create_dataset(params)
train, trainc, test = init_dataset(data, params, transformation=None)

# %% Train

model_nn = NN(nn_params)

results_NN = train_NN(model_nn,
                      train,
                      test,
                      nn_params,
                      params,
                      args,
                      noise=0.01)

# model_pinn = PINN(nn_params, params)
Пример #2
0
             np.mean(mse_u_loss),
             np.mean(mse_f_loss)])

    # Save models and data
    torch.save(model.state_dict(),
               PATH_MODELS + NAME + f'_{n_epochs}_{n_data}_{n_coll}.pth')
    np.savez(PATH_DATA + NAME + f'_{n_epochs}_{batch_size}_{n_data}_{n_coll}',
             train_loss=train_loss)

    return train_loss


#%% Testing

if __name__ == '__main__':

    args, general, params, nn_params = cli()
    params['t_span'] = (params['t_min'], params['t_max'])
    params['p_span'] = (params['p_min'], params['p_max'])

    data = create_dataset(params)
    train, trainc, test = init_dataset(data, params)

    #model_nn = NN(nn_params)

    #results_nn = train_NN(model_nn, train, test, nn_params, data_params)

    model_pinn = PINN(nn_params, params)

    results_pinn = train_PINN(model_pinn, trainc, test, nn_params, params)
Пример #3
0
import argparse
import numpy as np
import tensorflow as tf

from model import ANN
import data

parser = argparse.ArgumentParser(description='Visualize ANN')
parser.add_argument('-d',
                    '--dataset',
                    type=str,
                    default='mnist',
                    choices=data.get_names())
parser.add_argument('--num_iter', type=int, default=5000)
args = parser.parse_args()

dataset = data.init_dataset(name=args.dataset)

model = ANN(dataset.shape)

model.train(dataset.tr_data, dataset.tr_labels, num_iter=args.num_iter)
Пример #4
0
if __name__ == '__main__':

    DATA_DIR = 'data/'

    from cli import cli
    from data import create_dataset, init_dataset

    args, general, params, nn_params = cli()
    params['t_span'] = (params['t_min'], params['t_max'])
    params['p_span'] = (params['p_min'], params['p_max'])
    n_data = params["n_data"]
    n_coll = params['n_collocation']

    data = create_dataset(params)
    X_u, X_f, y_delta, y_omega = data
    train, trainc, test = init_dataset(data, params)
    train_idx, trainc_idx, test_idx = init_dataset(data,
                                                   params,
                                                   sample=False,
                                                   transformation=None)
    X_train, y_delta_train, y_omega_train, trf_params_train = train
    X_test, y_delta_test, y_omega_test, trf_params_test = test
    X_train_idx, y_delta_train_idx, y_omega_train_idx, trf_params_train_idx = train_idx
    X_test_idx, y_delta_test_idx, y_omega_test_idx, trf_params_test_idx = test_idx

#%% Data for BNN

idx = 0
X_selected = torch.tensor(X_train_idx[idx * n_data:idx * n_data + n_data, :2],
                          dtype=torch.float32)
y_selected = torch.tensor(y_delta_train_idx[idx * n_data:idx * n_data +
Пример #5
0
dataset_path = '../dataset/train2017'
style_image_path = '../style/kandinsky.jpg'
content_image_path = '../content/tubingen.jpg'
output_path = '../output/'

batch_size = 1
batch_shape = (batch_size, 256, 256, 3)

content_loss_weight = 1e0
style_loss_weight = 1e3
tv_loss_weight = 2e2

learning_rate = 1
epoches = 2

data.init_dataset(dataset_path, batch_shape)

content_img = data.get_img(content_image_path)
content_img = data.img_fit_to(content_img)

content_input = np.expand_dims(content_img, axis=0)
content_input = models.vgg_preprocess(content_input)
content_input_vgg = tf.constant(content_input, dtype=tf.float32, name='content_input')
style_input = np.expand_dims(data.get_img(style_image_path), axis=0)
style_input = models.vgg_preprocess(style_input)
style_input_vgg = tf.Variable(style_input, dtype=tf.float32, name='style_input')



#mixer_net = models.load_mixer_net(batch_input)
#image_input = mixer_net
Пример #6
0
            },
            resolver=lambda root, args, *_: save_obj_detect_image(
                args.get('id'), args.get('project'), args.get('annotations'))
        ),
        'allPost': GraphQLField(
            ObjAllPostType,
            args={
                'name': GraphQLArgument(GraphQLString),
                'description': GraphQLArgument(GraphQLString),
                'tissue': GraphQLArgument(GraphQLString),
                'dataset': GraphQLArgument(GraphQLString)
            },
            resolver=lambda root, args, *_: save_model_post(
                args.get('name'), 
                args.get('description'), 
                args.get('tissue'), 
                args.get("dataset"), 
            ),
        )
    }
)

Schema = GraphQLSchema(QueryRootType, MutationRootType)


# Init test project
fold_fpath = data.get_fpath(cfg.PROJECT_NAME, cfg.FOLD_FNAME)
if not os.path.exists(fold_fpath):
    _ = data.init_dataset(cfg.PROJECT_NAME, cfg.MEDIA_PATH, 
                          cfg.IMG_EXT, cfg.PROJECT_LABELS)
Пример #7
0
import os, sys, numpy as np
import config

os.chdir('src/')  # fix for data.init_dataset()
np.random.seed(config.seed)

import data, tfidf, models, sentimentanalysis
from utils import utils, io

# info = pandas.read_csv(config.dataset_dir + 'final_data.csv')
dataset = data.init_dataset()

# load model
m = config.dataset_dir + 'models/default_model.json'
w = config.dataset_dir + 'models/default_model_w.h5'
model = models.load_model(m, w)

if __name__ == '__main__':
    args = sys.argv
    if len(args) > 1:
        filename = '../' + args[1]
    else:
        filename = config.dataset_dir + '1118.txt'

    print('\n filename:', filename)
    tokens, lines = io.read_book3(filename)

    # build feature vector
    v1 = data.tokenlist_to_vector(tokens, dataset.sentiment_dataset)
    v2 = np.array(sentimentanalysis.per_book(lines))