def test_init_cond(): params = Data() params.delta_x = 0.1 * np.ones(50) params.analytical = wave_forms.square controller = Controller(params) assert(controller.init[0] == 0.0) assert(controller.init[-1] == 0.0)
def test_mesh_initialize(): params = Data() params.delta_x = np.array([0.5, 1.0, 0.1, 2.0]) controller = Controller(params) correct = np.array([0.25, 1.0, 1.55, 2.6]) assert(controller.mesh.domain_width == 3.6) assert(len(controller.mesh.x) == 4) assert((controller.mesh.x == correct).all())
def test_conv_test(): my_params = Data() my_params.plotter = Data() my_params.plotter.always_plot = False my_params.plotter.never_plot = True my_params.plotter.plot_interval = 0.5 my_params.t_max = 50.0 #my_params.analytical = lambda x: wave_forms.sin_4(x, 2 * np.pi * 1.0) my_params.analytical = lambda x: wave_forms.gaussian(x, 4.0, 2.5) ct = ConvergenceTest(my_params, 5.0)
def main(): data_path = os.path.join(CONFIGS['data']['dir'], CONFIGS['data']['filename']) data = Data(data_path, split=[0.7, 0.15, 0.15]) data.preprocess() data.vectorize() trainloader = DataLoader(data.train(), batch_size=128) trainloader2 = DataLoader(data.train(), batch_size=1) devloader = DataLoader(data.valid(), batch_size=1) vae = VAE(data.input_dim_(), 256, 128, device).to(device) vae.train(trainloader, trainloader2, devloader)
def main(): parser = argparse.ArgumentParser(prog='G+RSS.Poller') parser.add_argument('--redis_port', default=6379, type=int) parser.add_argument('--redis_host', default='127.0.0.1') parser.add_argument('--redis_db', default=0, type=int) parser.add_argument('--log_path', required=True) parser.add_argument('--config_path', required=True) parser.add_argument('--max_results', default=4, type=int) parser.add_argument('--name', required=True) parser.add_argument('--period', default=900, type=int) args = parser.parse_args() logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S') logger = logging.getLogger(__name__) logger.addHandler( config.getLogHandler(os.path.join(args.log_path, 'poller_test.log'))) logger.level = logging.DEBUG data = Data(logger, args.redis_host, args.redis_port, args.redis_db) picasa = Picasa(logger, args.config_path) album = picasa.get_album('113347540216001053968', '5963913461943227665') print(album)
def main(): parser = argparse.ArgumentParser(prog='G+RSS.Poller') parser.add_argument('--redis_port', default=6379, type=int) parser.add_argument('--redis_host', default='127.0.0.1') parser.add_argument('--redis_db', default=0, type=int) parser.add_argument('--log_path', required=True) parser.add_argument('--config_path', required=True) parser.add_argument('--max_results', default=4, type=int) parser.add_argument('--name', required=True) parser.add_argument('--period', default=900, type=int) args = parser.parse_args() logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S') logger = logging.getLogger(__name__) logger.addHandler( config.getLogHandler(os.path.join(args.log_path, 'poller_test.log'))) logger.level = logging.DEBUG data = Data(logger, args.redis_host, args.redis_port, args.redis_db) providers = {'google': GooglePollMock(logger, data)} pol = Poller(logger=logger, name=args.name, data=data, providers=providers) pol.poll(args.period)
def __init__(self): self.data, self.label = Data().generate_data() self.model = LinearModel() self.learning_rate = cfg.LEARNING_RATE self.max_step = cfg.MAX_STEP self.stop_condition = cfg.STOP_CONDITION self.global_step = cfg.GLOBAL_STEP self.cost = [] self.weights = cfg.INIT_WEIGHTS
def __init__(self): self.data = Data().generate_data() self.model = LinearModel() self.num_data = cfg.NUM_DATA self.learning_rate = cfg.LEARNING_RATE self.max_step = cfg.MAX_STEP self.weights = cfg.INIT_W self.cost = [] self.stop_condition = cfg.STOP_CONDITION self.global_step = 0
def calculate_predictions(experiment_name, dataset): print('Loading data... ') sys.stdout.flush() if experiment_name in ['danq', 'deepsea', 'danqjaspar']: data = Data(data_suffix='_full') X, y = data.get_data(dataset) else: data = get_data_loader(experiment_name) X, y = data.get_data(dataset) print('Loading model... ') sys.stdout.flush() model = get_trained_model(experiment_name) print('Calculating predictions... ') sys.stdout.flush() make_predictions(model, X, join( RESULT_DIR, 'predictions-best', '{}-{}{}.npy'.format(experiment_name, dataset, data.suffix)), verbose=1)
def save_experiment_design(model_class, experiment_name, model_args={}, data_args={}, train_settings={}, callback_settings={}, check_model=False, check_data=False): to_save = {'model_class': model_class, 'model_args': model_args, 'data_args': data_args, 'train_settings': train_settings, 'callback_settings': callback_settings} if check_model: model_class(**model_args).get_compiled_model() if check_data: Data(**data_args) os.makedirs('experiment_settings', exist_ok=True) pickle.dump(to_save, open('experiment_settings/{}.p'.format(experiment_name), 'wb'))
def test_error_tracker(): delta_x = 0.01 * np.ones(100) exact = lambda t: np.linspace(0, 1, 100) + t params = Data() params.error_norm = 1 params.plotter = Data() params.plotter.never_plot = True e1 = ErrorTracker(Mesh(delta_x), exact, params) params.error_norm = 2 e2 = ErrorTracker(Mesh(delta_x), exact, params) for i in range(1, 10): # the exact result is x + i e1.update(e1.mesh.x + 0.9 * i, float(i), 1.0) # in any norm the error should be 0.i assert(abs(e1.error[i] - i * 0.1) <= 1e-9) new_val = exact(i) new_val[0] = 0 # the difference e2.update(new_val, float(i), 1.0) # L2 norm of the error should be 0.(i + 1) assert(abs(e2.error[i] - (i * 0.01)) <= 1e-9)
def set_matrix(self): if self.radio_file.isChecked(): file_name = QtWidgets.QFileDialog.getOpenFileName( self, 'Choose file', os.path.expanduser('~'), 'Text file (*.txt);;All files (*)')[0] if file_name: task_type = 'min' if self.max_type_radio.isChecked(): task_type = 'max' self.task = Data(file_name, task_type=task_type, from_file=True) self.first_city_spin_box.setMaximum(len(self.task.matrix)) if self.radio_keyboard.isChecked(): task_type = 'min' if self.max_type_radio.isChecked(): task_type = 'max' dialog = DialogView.get_matrx() if dialog[0]: matrix = np.array(dialog[1]) self.task = Data(matrix, task_type=task_type) self.first_city_spin_box.setMaximum(len(self.task.matrix))
def _test_controller_helper(wave, t_max, delta_x, error_bound, always=False, setup_callback=None): # Simple test to make sure the code works right my_params = Data() my_params.delta_x = delta_x my_params.plotter = Data() my_params.plotter.always_plot = always my_params.plotter.never_plot = not interactive_test my_params.plotter.plot_interval = 0.5 my_params.t_max = t_max my_params.analytical = wave cont = Controller(my_params) if setup_callback is not None: setup_callback(cont) et = ErrorTracker(cont.mesh, cont.analytical, my_params) soln_plot = UpdatePlotter(my_params.plotter) soln_plot.add_line(cont.mesh.x, cont.init, '+') soln_plot.add_line(cont.mesh.x, cont.init, '-') cont.observers.append(soln_plot) cont.observers.append(et) result = cont.compute() soln_plot.add_line(cont.mesh.x, cont.exact) # check essentially non-oscillatoriness # total variation <= initial_tv + O(h^2) init_tv = Controller.total_variation(cont.init) result_tv = Controller.total_variation(result) if interactive_test is True: pyp.show() assert(result_tv < init_tv + error_bound) # check error assert(et.error[-1] < error_bound) return et, soln_plot
def __init__(self, params=Data()): self.proj_name = "test" self.run_name = "test" if "proj_name" in params: self.proj_name = params.proj_name if "run_name" in params: self.run_name = params.run_name if "material" in params: self.material = params.material self.params = params self.data = Data() self.data_loc = None self._assign_data_location() self._initialize()
import matplotlib.pyplot as plt from core.utils import get_kfold_index from core.data import Data from core.app_config import AppConfig from core.scoring import scob matplotlib.use('Agg') param = { "lr": 1e-04, "ut_1": 1024, "l1": 0.0, "ut_2": 256, "l2": 0.00, "dp": 0.0, 'a': 'leaky_relu', 'inputs_shape': (7, ) } model = build_sub_model_1(**param) data = Data() (x_train, y_train), (x_test, y_test), (x_test_1, y_test_1), (x_test_2, y_test_2) = data.get_channels( ['sequence_feature']) model.fit(x_train[0], y_train, validation_split=0.2, batch_size=32, epochs=50) score = scob.get_scores(y_test_2[:, 1], model.predict(x_test_2)[:, 1]) print(score)
while task_type not in ('min', 'max'): print('Wrong, try again.') task_type = input('Enter task type(min or max):\n') print('How you want to enter data?:\n' '\t1)From file;\n' '\t2)From keyboard;') insert_type = input() while insert_type not in ('1', '2'): insert_type = input() if insert_type not in ('1', '2'): print('Error, try again\n') if insert_type == '1': print('Careful! Matrix in file should be square. One string in file - one row of matrix, ' 'entry splitter - ";".\n') matr_file = input('Enter path to file(full):\n') task = Data(matr_file, task_type, from_file=True) task.solve() print('Answer:', task.result) elif insert_type == '2': matrix = [] dim = input('Enter dimension(max=10):\n') while int(dim) not in range(1, 11): print('Wrong, try again\n') dim = input() print('Enter matrix(1 row per string, entries splitter is whitespace:') for i in range(int(dim)): row = input() matrix.append(list(map(float, row.split(' ')))) matrix = np.array(matrix) task = Data(matrix, task_type) start = time.clock()
import numpy as np import pickle import os from sklearn.model_selection import KFold import pandas as pd from core.scoring import get_scores, show_scores from core.models import * # tf.keras.backend.set_floatx('float32') from itertools import combinations from core.data import Data from core.utils import get_kfold_index from core.app_config import AppConfig data = Data() config = AppConfig() config.CUDA_VISIBLE_DEVICES = '0' config.data = Data() kfold_index = get_kfold_index() # 交叉验证的索引 cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0., patience=10, verbose=2, mode='min', baseline=None) # 早停参数 params = dict(cv=5, epochs=500, batch_size=32, kfold_index=kfold_index, cb=[cb]) config.extent(params)
from core.data import Data #ALL IN STANDARD SI UNITS # all values here come from the wet diabase in takeuchi, fialko wetdiabase = Data() wetdiabase.density = 2850.0 # kg/m^3 wetdiabase.specific_heat = 1000.0 # J/kgK wetdiabase.activation_energy = 2.6e5 # J/mol wetdiabase.stress_exponent = 3.4 wetdiabase.creep_constant = 2.2e-4 * 10 ** (-6 * 3.4) # (Pa^-n)/sec wetdiabase.thermal_diffusivity = 7.37e-7 # m^2/sec wetdiabase.youngs_modulus = 80.0e9 # Pa wetdiabase.poisson = 0.25 wetdiabase.shear_modulus = wetdiabase.youngs_modulus / (2 * (1 + wetdiabase.poisson)) wetdiabase.lame_lambda = (wetdiabase.youngs_modulus * wetdiabase.poisson) /\ ((1 + wetdiabase.poisson) * (1 - 2 * wetdiabase.poisson))
class Experiment(object): """ The experiment class provides a general framework for defining a numerical experiment and running it. Any experiment should be subclassed from this "Experiment" cass. The functions to implement are "_initialize", "_compute" and "_visualize". The class provides methods to save data automatically to a central location. By convention a subclass should never modify the parameters passed to it. Use self.data.var_name or self.var_name depending on whether the data should be saved or not """ def __init__(self, params=Data()): self.proj_name = "test" self.run_name = "test" if "proj_name" in params: self.proj_name = params.proj_name if "run_name" in params: self.run_name = params.run_name if "material" in params: self.material = params.material self.params = params self.data = Data() self.data_loc = None self._assign_data_location() self._initialize() def _initialize(self): raise Exception("_initialize is still a stub method") def compute(self): """ Abstract base for the computation of an experiment """ return self._compute() def _compute(self): raise Exception("_compute is still a stub method") def visualize(self): """ Abstract base for the visualization of an experiment """ return self._visualize() def _visualize(self): raise Exception("_visualize is still a stub method") def save(self): """ Save the data and the parameters """ self.params.save(self.data_loc + "/params.pkl") self.data.save(self.data_loc + "/data.pkl") def load(self, filename): self.data = Data.load(filename) def _assign_data_location(self): """ This assigns a data folder to the experiment. The folder is chosen as the data_root/proj_name/run_name# where # is the lowest number that hasn't already been chosen. """ if self.data_loc is not None: return if MPI.COMM_WORLD.Get_rank() is not 0: return folder_name = data_root + "/" + self.proj_name if not os.path.exists(folder_name): os.mkdir(folder_name) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") new_run_folder_name = folder_name + "/" + self.run_name + "_" + timestamp # We intentionally ignore the case where the folder already exists. # This means data may be overwritten, but it is the user's resp # to make sure this doesn't happen. if not os.path.exists(new_run_folder_name): os.mkdir(new_run_folder_name) self.data_loc = new_run_folder_name
def __init__(self): self.data = Data().generate_data() self.num_data = cfg.NUM_DATA
from core.update_plotter import UpdatePlotter from core.error_tracker import ErrorTracker from core.experiment import Experiment from rupturotops.controller import Controller from parameters.material import wetdiabase from core.constants import consts from rupturotops import wave_forms assert consts from core.debug import _DEBUG assert _DEBUG import numpy as np # Define the standard parameter data structure. params = Data() # What material should we use? Look in the parameters/material file # to see the options and to see how to define a new material. params.material = wetdiabase # Setup the solution domain, first we define the cell spacings delta_x = [] count = 2000 domain_length = 10.0 for i in range(count): if i % 10 == 0: delta_x.append(domain_length / count) else: delta_x.append(domain_length / count)
import os from core.models import * from core.utils import cross_validation from core.visualization import plot_cv_out, plot_roc_curve_on_ax import matplotlib import matplotlib.pyplot as plt from core.utils import get_kfold_index from core.data import Data from core.app_config import AppConfig matplotlib.use('Agg') # 配置参数 config = AppConfig() config.CUDA_VISIBLE_DEVICES = '0' config.data = Data() kfold_index = get_kfold_index() # 交叉验证的索引 bc = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0., patience=10, verbose=2, mode='min', baseline=None) # 早停参数 params = dict(cv=5, epochs=500, batch_size=32, kfold_index=kfold_index, bc=bc) config.extent(params) def train_a_model(data, model, output_dir,
def load(self, filename): self.data = Data.load(filename)
from core.data import Data #ALL IN STANDARD SI UNITS # all values here come from the wet diabase in takeuchi, fialko wetdiabase = Data() wetdiabase.density = 2850.0 # kg/m^3 wetdiabase.specific_heat = 1000.0 # J/kgK wetdiabase.activation_energy = 2.6e5 # J/mol wetdiabase.stress_exponent = 3.4 wetdiabase.creep_constant = 2.2e-4 * 10**(-6 * 3.4) # (Pa^-n)/sec wetdiabase.thermal_diffusivity = 7.37e-7 # m^2/sec wetdiabase.youngs_modulus = 80.0e9 # Pa wetdiabase.poisson = 0.25 wetdiabase.shear_modulus = wetdiabase.youngs_modulus / ( 2 * (1 + wetdiabase.poisson)) wetdiabase.lame_lambda = (wetdiabase.youngs_modulus * wetdiabase.poisson) /\ ((1 + wetdiabase.poisson) * (1 - 2 * wetdiabase.poisson))
from core.data import Data import numpy as np from material import wetdiabase from experiments.diffusive import Diffusive from core.constants import consts params = Data() params.material = wetdiabase params.proj_name = 'diffusive' params.run_name = 'test' params.low_x = 0.0 params.high_x = 0.1 params.low_t = 0.001 * consts.secs_in_a_year params.high_t = 1000000.0 * consts.secs_in_a_year params.x_count = 25 params.t_count = 100 params.x_domain = np.linspace(params.low_x, params.high_x, params.x_count) params.t_domain = np.linspace(params.low_t, params.high_t, params.t_count) params.time_scale = 100.0 * consts.secs_in_a_year # 100 years params.init_temp = 350.0 + 273.0 # initial temperature params.delta_t = 2.5 # total increase in temperature for an event params.stress = 100.0e6 # MPa experiment = Diffusive
def main(): print("Note: this one doesn't collect any password or username\n" + "This is only for personal use.") user = manager.chaeck_data() user_seprate = manager.spilit_data(user) username = user_seprate[0] password = user_seprate[1] data = Data() # data contains urls and params now = datetime.datetime.now() # Now datetime events_list = list_events_summary() # List of events in calendar # Get name and token token = baddyrequest.send_token(data, username, password) profile = baddyrequest.send_profile(data, token) # str to dict profile = literal_eval(profile) # Show name print(profile['FirstName'] + ' ' + profile['LastName']) # Get lessons and count the number of them lessons = baddyrequest.send_lessons(data, token) lessons = literal_eval(lessons) lessons = lessons[0]['Lessons'] # number_lessons = len(lessons) for les in lessons: number = les['GroupID'] practice_url = f"https://yaraapi.mazust.ac.ir/api/practices/actives/{number}" practices = baddyrequest.send(data, practice_url, token) for prac in practices: # Convert date by jalali finish_date = prac['FinishDate'] finish_date = conv.jalali_converter(finish_date) # Finish time contains hour and minute finish_time = prac['FinishTime'] # Seprate hour and minute finish_hour = int(finish_time[:2]) finish_minute = int(finish_time[:2]) # Add time to datetime.datetime finish_datetime = finish_date.replace(minute=finish_minute, hour=finish_hour) # Start DateTime start_date = prac['StartDate'] start_time = prac['StartTime'] # Convert date by jalali start_date = conv.jalali_converter(start_date) # Seprate hour and minute start_hour = int(start_time[:2]) start_minute = int(start_time[:2]) # Add time to datetime.datetime start_datetime = start_date.replace(minute=start_minute, hour=start_hour) title = prac['Title'] description = prac['Description'] body = { "summary": f"{les['LessonTitle']} - {title}", "description": description, "start": { "dateTime": start_datetime.isoformat(), "timeZone": 'Asia/Tehran' }, "end": { "dateTime": finish_datetime.isoformat(), "timeZone": 'Asia/Tehran' }, } if (finish_datetime >= now) and (body['summary'] not in events_list): create(body)
import core import scripts from core.data import Data from core.text import TextConverter if __name__ == '__main__': # 初始化应用上下文 core.init_context("config.yaml") # 数据库构建脚本 scripts.init_data_base(core.csv_tpl, core.pdbc_tpl) data_dict = { '城市数据': core.pdbc_tpl.query_table("t_city_data"), '市场数据': core.pdbc_tpl.query_table("t_market_data"), '景气指数': core.pdbc_tpl.query_table("t_prosperity_index"), '量价指数': core.pdbc_tpl.query_table("t_volume_price_index"), '旅客规模': core.pdbc_tpl.query_table("t_passenger_size"), '旅客特征': core.pdbc_tpl.query_table("t_passenger_characteristics"), '日均旅客量': core.pdbc_tpl.query_table("t_average_daily_passenger_volume"), } # 生成图片脚本 scripts.generate_img(core.word_tpl, data_dict) # 生成文本脚本 text = TextConverter(core.word_tpl, Data(data_dict)) # 生成文档脚本 scripts.generate_word(core.word_tpl, "2017年航指数半年白皮书—发布版.docx", text)
def __init__(self): self.data, self.label = Data().generate_data() print((self.label ** 2).sum() / 1000) self.data_num = cfg.DATA_NUM
def get_data_loader(experiment_name): data_args = pickle.load( open('experiment_settings/{}.p'.format(experiment_name), 'rb'))['data_args'] return Data(**data_args)
import os, sys from core.data import Data from core.train_model import get_trained_model def append_to_losses(expt_name, dataset, loss, filename='final_losses_{}.csv'.format(sys.argv[2])): with open(filename, 'a') as f: f.write('{},{},{}\n'.format(expt_name, dataset, loss)) RESULT_DIR = os.environ.get('RESULT_DIR', 'results') data = Data(sequence_length=int(sys.argv[2]), data_suffix='_full') m = get_trained_model(sys.argv[1]) print('evaluating model', flush=True) l = m.evaluate(*data.get_data('test')) print('saving results', flush=True) append_to_losses(sys.argv[1], 'test', l)