Exemple #1
0
def main():
    parser = argparse.ArgumentParser(prog='G+RSS.Poller')
    parser.add_argument('--redis_port', default=6379, type=int)
    parser.add_argument('--redis_host', default='127.0.0.1')
    parser.add_argument('--redis_db', default=0, type=int)
    parser.add_argument('--log_path', required=True)
    parser.add_argument('--config_path', required=True)
    parser.add_argument('--max_results', default=4, type=int)
    parser.add_argument('--name', required=True)
    parser.add_argument('--period', default=900, type=int)
    args = parser.parse_args()

    logging.basicConfig(format='%(asctime)s %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S')
    logger = logging.getLogger(__name__)
    logger.addHandler(
        config.getLogHandler(os.path.join(args.log_path, 'poller_test.log')))
    logger.level = logging.DEBUG

    data = Data(logger, args.redis_host, args.redis_port, args.redis_db)

    picasa = Picasa(logger, args.config_path)

    album = picasa.get_album('113347540216001053968', '5963913461943227665')

    print(album)
Exemple #2
0
def main():
    parser = argparse.ArgumentParser(prog='G+RSS.Poller')
    parser.add_argument('--redis_port', default=6379, type=int)
    parser.add_argument('--redis_host', default='127.0.0.1')
    parser.add_argument('--redis_db', default=0, type=int)
    parser.add_argument('--log_path', required=True)
    parser.add_argument('--config_path', required=True)
    parser.add_argument('--max_results', default=4, type=int)
    parser.add_argument('--name', required=True)
    parser.add_argument('--period', default=900, type=int)
    args = parser.parse_args()

    logging.basicConfig(format='%(asctime)s %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S')
    logger = logging.getLogger(__name__)
    logger.addHandler(
        config.getLogHandler(os.path.join(args.log_path, 'poller_test.log')))
    logger.level = logging.DEBUG

    data = Data(logger, args.redis_host, args.redis_port, args.redis_db)
    providers = {'google': GooglePollMock(logger, data)}

    pol = Poller(logger=logger, name=args.name, data=data, providers=providers)

    pol.poll(args.period)
Exemple #3
0
    def __init__(self):
        self.data, self.label = Data().generate_data()
        self.model = LinearModel()

        self.learning_rate = cfg.LEARNING_RATE
        self.max_step = cfg.MAX_STEP
        self.stop_condition = cfg.STOP_CONDITION
        self.global_step = cfg.GLOBAL_STEP

        self.cost = []
        self.weights = cfg.INIT_WEIGHTS
Exemple #4
0
    def __init__(self):
        self.data = Data().generate_data()
        self.model = LinearModel()

        self.num_data = cfg.NUM_DATA
        self.learning_rate = cfg.LEARNING_RATE
        self.max_step = cfg.MAX_STEP
        self.weights = cfg.INIT_W
        self.cost = []
        self.stop_condition = cfg.STOP_CONDITION

        self.global_step = 0
def save_experiment_design(model_class, experiment_name, model_args={}, data_args={}, train_settings={}, 
                           callback_settings={}, check_model=False, check_data=False):
  to_save = {'model_class': model_class,
             'model_args': model_args,
             'data_args': data_args,
             'train_settings': train_settings,
             'callback_settings': callback_settings}
  if check_model:
    model_class(**model_args).get_compiled_model()
  if check_data:
    Data(**data_args)
  os.makedirs('experiment_settings', exist_ok=True)
  pickle.dump(to_save, open('experiment_settings/{}.p'.format(experiment_name), 'wb'))
 def set_matrix(self):
     if self.radio_file.isChecked():
         file_name = QtWidgets.QFileDialog.getOpenFileName(
             self, 'Choose file', os.path.expanduser('~'),
             'Text file (*.txt);;All files (*)')[0]
         if file_name:
             task_type = 'min'
             if self.max_type_radio.isChecked():
                 task_type = 'max'
             self.task = Data(file_name,
                              task_type=task_type,
                              from_file=True)
             self.first_city_spin_box.setMaximum(len(self.task.matrix))
     if self.radio_keyboard.isChecked():
         task_type = 'min'
         if self.max_type_radio.isChecked():
             task_type = 'max'
         dialog = DialogView.get_matrx()
         if dialog[0]:
             matrix = np.array(dialog[1])
             self.task = Data(matrix, task_type=task_type)
             self.first_city_spin_box.setMaximum(len(self.task.matrix))
Exemple #7
0
def main():
    data_path = os.path.join(CONFIGS['data']['dir'],
                             CONFIGS['data']['filename'])
    data = Data(data_path, split=[0.7, 0.15, 0.15])
    data.preprocess()
    data.vectorize()

    trainloader = DataLoader(data.train(), batch_size=128)
    trainloader2 = DataLoader(data.train(), batch_size=1)
    devloader = DataLoader(data.valid(), batch_size=1)

    vae = VAE(data.input_dim_(), 256, 128, device).to(device)

    vae.train(trainloader, trainloader2, devloader)
Exemple #8
0
def calculate_predictions(experiment_name, dataset):
    print('Loading data... ')
    sys.stdout.flush()
    if experiment_name in ['danq', 'deepsea', 'danqjaspar']:
        data = Data(data_suffix='_full')
        X, y = data.get_data(dataset)
    else:
        data = get_data_loader(experiment_name)
        X, y = data.get_data(dataset)

    print('Loading model... ')
    sys.stdout.flush()
    model = get_trained_model(experiment_name)
    print('Calculating predictions... ')
    sys.stdout.flush()
    make_predictions(model,
                     X,
                     join(
                         RESULT_DIR, 'predictions-best',
                         '{}-{}{}.npy'.format(experiment_name, dataset,
                                              data.suffix)),
                     verbose=1)
Exemple #9
0
import core
import scripts
from core.data import Data
from core.text import TextConverter

if __name__ == '__main__':
    # 初始化应用上下文
    core.init_context("config.yaml")
    # 数据库构建脚本
    scripts.init_data_base(core.csv_tpl, core.pdbc_tpl)
    data_dict = {
        '城市数据': core.pdbc_tpl.query_table("t_city_data"),
        '市场数据': core.pdbc_tpl.query_table("t_market_data"),
        '景气指数': core.pdbc_tpl.query_table("t_prosperity_index"),
        '量价指数': core.pdbc_tpl.query_table("t_volume_price_index"),
        '旅客规模': core.pdbc_tpl.query_table("t_passenger_size"),
        '旅客特征': core.pdbc_tpl.query_table("t_passenger_characteristics"),
        '日均旅客量': core.pdbc_tpl.query_table("t_average_daily_passenger_volume"),
    }
    # 生成图片脚本
    scripts.generate_img(core.word_tpl, data_dict)
    # 生成文本脚本
    text = TextConverter(core.word_tpl, Data(data_dict))
    # 生成文档脚本
    scripts.generate_word(core.word_tpl, "2017年航指数半年白皮书—发布版.docx", text)
Exemple #10
0
from core.data import Data

#ALL IN STANDARD SI UNITS
# all values here come from the wet diabase in takeuchi, fialko
wetdiabase = Data()
wetdiabase.density = 2850.0  # kg/m^3
wetdiabase.specific_heat = 1000.0  # J/kgK
wetdiabase.activation_energy = 2.6e5  # J/mol
wetdiabase.stress_exponent = 3.4
wetdiabase.creep_constant = 2.2e-4 * 10**(-6 * 3.4)  # (Pa^-n)/sec
wetdiabase.thermal_diffusivity = 7.37e-7  # m^2/sec
wetdiabase.youngs_modulus = 80.0e9  # Pa
wetdiabase.poisson = 0.25
wetdiabase.shear_modulus = wetdiabase.youngs_modulus / (
    2 * (1 + wetdiabase.poisson))
wetdiabase.lame_lambda = (wetdiabase.youngs_modulus * wetdiabase.poisson) /\
    ((1 + wetdiabase.poisson) * (1 - 2 * wetdiabase.poisson))
Exemple #11
0
import numpy as np
import pickle
import os
from sklearn.model_selection import KFold
import pandas as pd
from core.scoring import get_scores, show_scores
from core.models import *
# tf.keras.backend.set_floatx('float32')
from itertools import combinations
from core.data import Data
from core.utils import get_kfold_index
from core.app_config import AppConfig

data = Data()
config = AppConfig()
config.CUDA_VISIBLE_DEVICES = '0'
config.data = Data()

kfold_index = get_kfold_index()  # 交叉验证的索引
cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                      min_delta=0.,
                                      patience=10,
                                      verbose=2,
                                      mode='min',
                                      baseline=None)  # 早停参数
params = dict(cv=5,
              epochs=500,
              batch_size=32,
              kfold_index=kfold_index,
              cb=[cb])
config.extent(params)
Exemple #12
0
 while task_type not in ('min', 'max'):
     print('Wrong, try again.')
     task_type = input('Enter task type(min or max):\n')
 print('How you want to enter data?:\n'
       '\t1)From file;\n'
       '\t2)From keyboard;')
 insert_type = input()
 while insert_type not in ('1', '2'):
     insert_type = input()
     if insert_type not in ('1', '2'):
         print('Error, try again\n')
 if insert_type == '1':
     print('Careful! Matrix in file should be square. One string in file - one row of matrix, '
           'entry splitter - ";".\n')
     matr_file = input('Enter path to file(full):\n')
     task = Data(matr_file, task_type, from_file=True)
     task.solve()
     print('Answer:', task.result)
 elif insert_type == '2':
     matrix = []
     dim = input('Enter dimension(max=10):\n')
     while int(dim) not in range(1, 11):
         print('Wrong, try again\n')
         dim = input()
     print('Enter matrix(1 row per string, entries splitter is whitespace:')
     for i in range(int(dim)):
         row = input()
         matrix.append(list(map(float, row.split(' '))))
     matrix = np.array(matrix)
     task = Data(matrix, task_type)
     start = time.clock()
Exemple #13
0
import os, sys
from core.data import Data
from core.train_model import get_trained_model


def append_to_losses(expt_name,
                     dataset,
                     loss,
                     filename='final_losses_{}.csv'.format(sys.argv[2])):
    with open(filename, 'a') as f:
        f.write('{},{},{}\n'.format(expt_name, dataset, loss))


RESULT_DIR = os.environ.get('RESULT_DIR', 'results')

data = Data(sequence_length=int(sys.argv[2]), data_suffix='_full')
m = get_trained_model(sys.argv[1])
print('evaluating model', flush=True)
l = m.evaluate(*data.get_data('test'))
print('saving results', flush=True)
append_to_losses(sys.argv[1], 'test', l)
Exemple #14
0
 def __init__(self):
     self.data, self.label = Data().generate_data()
     print((self.label ** 2).sum() / 1000)
     self.data_num = cfg.DATA_NUM
Exemple #15
0
import matplotlib.pyplot as plt
from core.utils import get_kfold_index
from core.data import Data
from core.app_config import AppConfig
from core.scoring import scob

matplotlib.use('Agg')

param = {
    "lr": 1e-04,
    "ut_1": 1024,
    "l1": 0.0,
    "ut_2": 256,
    "l2": 0.00,
    "dp": 0.0,
    'a': 'leaky_relu',
    'inputs_shape': (7, )
}

model = build_sub_model_1(**param)

data = Data()
(x_train, y_train), (x_test,
                     y_test), (x_test_1,
                               y_test_1), (x_test_2,
                                           y_test_2) = data.get_channels(
                                               ['sequence_feature'])
model.fit(x_train[0], y_train, validation_split=0.2, batch_size=32, epochs=50)

score = scob.get_scores(y_test_2[:, 1], model.predict(x_test_2)[:, 1])
print(score)
Exemple #16
0
def main():
    print("Note: this one doesn't collect any password or username\n" +
          "This is only for personal use.")

    user = manager.chaeck_data()
    user_seprate = manager.spilit_data(user)
    username = user_seprate[0]
    password = user_seprate[1]

    data = Data()  # data contains urls and params
    now = datetime.datetime.now()  # Now datetime
    events_list = list_events_summary()  # List of events in calendar

    # Get name and token
    token = baddyrequest.send_token(data, username, password)
    profile = baddyrequest.send_profile(data, token)
    # str to dict
    profile = literal_eval(profile)

    # Show name
    print(profile['FirstName'] + ' ' + profile['LastName'])

    # Get lessons and count the number of them
    lessons = baddyrequest.send_lessons(data, token)
    lessons = literal_eval(lessons)
    lessons = lessons[0]['Lessons']

    # number_lessons = len(lessons)

    for les in lessons:
        number = les['GroupID']
        practice_url = f"https://yaraapi.mazust.ac.ir/api/practices/actives/{number}"
        practices = baddyrequest.send(data, practice_url, token)

        for prac in practices:
            # Convert date by jalali
            finish_date = prac['FinishDate']
            finish_date = conv.jalali_converter(finish_date)
            # Finish time contains hour and minute
            finish_time = prac['FinishTime']
            # Seprate hour and minute
            finish_hour = int(finish_time[:2])
            finish_minute = int(finish_time[:2])
            # Add time to datetime.datetime
            finish_datetime = finish_date.replace(minute=finish_minute,
                                                  hour=finish_hour)

            # Start DateTime
            start_date = prac['StartDate']
            start_time = prac['StartTime']
            # Convert date by jalali
            start_date = conv.jalali_converter(start_date)
            # Seprate hour and minute
            start_hour = int(start_time[:2])
            start_minute = int(start_time[:2])
            # Add time to datetime.datetime
            start_datetime = start_date.replace(minute=start_minute,
                                                hour=start_hour)

            title = prac['Title']
            description = prac['Description']
            body = {
                "summary": f"{les['LessonTitle']} - {title}",
                "description": description,
                "start": {
                    "dateTime": start_datetime.isoformat(),
                    "timeZone": 'Asia/Tehran'
                },
                "end": {
                    "dateTime": finish_datetime.isoformat(),
                    "timeZone": 'Asia/Tehran'
                },
            }

            if (finish_datetime >= now) and (body['summary']
                                             not in events_list):
                create(body)
Exemple #17
0
def get_data_loader(experiment_name):
    data_args = pickle.load(
        open('experiment_settings/{}.p'.format(experiment_name),
             'rb'))['data_args']
    return Data(**data_args)
Exemple #18
0
 def __init__(self):
     self.data = Data().generate_data()
     self.num_data = cfg.NUM_DATA