Ejemplo n.º 1
0
    def scheduler(self):
        configs = get_configs()
        for device in configs['devices']:
            if device['status'] == True and 'operation_time' in device:
                period = device['operation_time']['period']
                if type(period) is int and period is not 0:
                    period_steps = set(
                        device['operation_time']['period_steps'])
                    pin = device['pin'] if 'pin' in device else None
                    self.init_driver(device)
                    mcron.insert(
                        period, period_steps, device['uuid'],
                        self.callback(device['driver'],
                                      "publication",
                                      device['uuid'],
                                      pin,
                                      None,
                                      identifier=None,
                                      scheduling=False))
                else:
                    log("Scheduler: period of " + device['driver'] +
                        " driver is invalid")

        schedules = self.get_schedules()
        for scheduling in schedules:
            must_be_stored = False  # it's already persistent
            self.process_scheduling_create(scheduling, configs['devices'],
                                           must_be_stored)
Ejemplo n.º 2
0
    def process_scheduling_update(self, new_scheduling):
        [index, schedules] = self.get_scheduling(new_scheduling['identifier'])

        if index != -1:
            must_be_stored = True
            configs = get_configs()
            scheduling = schedules[index]

            self.delete_scheduling(new_scheduling['identifier'])  # remove old
            scheduling.update(new_scheduling['data'])
            self.process_scheduling_create(scheduling, configs['devices'],
                                           must_be_stored)  # insert updated

            return json.dumps({
                "data": "successful update",
                "type": "scheduling_reply",
                "gathered_at": get_date(),
                "identifier": scheduling['identifier']
            })
        else:
            return json.dumps({
                "data":
                "update failed, the scheduling does not exist or has already been processed",
                "type": "scheduling_reply",
                "gathered_at": get_date(),
                "identifier": new_scheduling['identifier']
            })
Ejemplo n.º 3
0
    def __init__(self):
        self.dir_root = os.path.dirname(os.path.abspath(__file__))
        
        fname_settings = os.path.join(self.dir_root, 'settings.yaml')
        self.configs = get_configs(fname_settings)

        self.url_homepage = self.configs['URL']['HOMEPAGE']
        self.headers_base = self.configs['HEADERS']['BASE']

        self.url_login = self.configs['URL']['LOGIN']
        self.email = self.configs['AUTH']['EMAIL']
        self.password = self.configs['AUTH']['PASSWORD']
        self.payload_login = {
            'email': self.email,
            'password': self.password,
            'rememberme': 'y',
        }

        self.url_questions = self.configs['URL']['QUESTIONS']
        self.payload_question = self.configs['PAYLOAD']['QUESTION']

        self.url_question_prefix = self.configs['URL']['QUESTION_PREFIX']

        self.timeout_query = self.configs['TIMEOUT']['QUERY']

        self.offset = self.configs['OFFSET']

        self.spider = requests.Session()
Ejemplo n.º 4
0
    def _load_credentials(self, vault_name=None, account_id=None):
        """
        It loads the vault name and account id from the configuration, then
        it overrides the configuration with the cli options (if entered).
        Finally, it prompts for the account password and opens the vault.

        Params:
        :param vault_name: The name of the vault
        :type: String
        :param account_id:
        :type: String

        TODO: Move the print and die to utils
        """
        config = get_configs(self.config_file)

        if vault_name:
            self.vault_name = vault_name
        elif config.get('vault_name'):
            self.vault_name = config.get('vault_name')
        else:
            print('Vault not specified')
            exit()

        if account_id:
            self.account_id = account_id
        elif config.get('account_id'):
            self.account_id = config.get('account_id')
        else:
            print('Account not specified')
            exit()

        self.password = getpass()
        return self.open_vault(self.account_id, self.password, self.vault_name)
Ejemplo n.º 5
0
def main():
    configs = get_configs()
    streamers: dict[str, Streamer] = {}
    providers_names = list(configs.providers.keys())

    if len(providers_names) == 0:
        raise Exception('No streamers provided')

    if 'yandex_music' in configs.providers is not None:
        ym_configs = configs.providers['yandex_music']
        streamers['yandex_music'] = YMStreamer(
            ym_configs['title'],
            username=ym_configs.get('username'),
            password=ym_configs.get('password'),
            token=ym_configs.get('token'),
            cache=ym_configs['cache'],
            debug=configs.debug,
        )

    streamers[providers_names[0]].run()

    if 'http' in configs.controllers:
        from controllers.http import http_handler
        runController(http_handler, streamers, {
            **configs.controllers['http'], "debug": configs
        })

    if 'cli' in configs.controllers:
        from controllers.cli import cli_handler
        runController(cli_handler, streamers)

    keep_main_thread_alive()
Ejemplo n.º 6
0
def evaluate_and_visualize(config_name,
                           model_path,
                           output_path,
                           gene_variant=None):
    # Prepare tokenizer, dataset, and model
    configs = get_configs(config_name, verbose=False)
    if configs['use_gene_features']:
        assert (not gene_variant is None)
        configs['gene_variant'] = gene_variant
    tokenizer = BertTokenizer.from_pretrained(configs['transformer'],
                                              do_basic_tokenize=False)
    train_set, dev_set, test_set = load_oneie_dataset(
        configs['base_dataset_path'], tokenizer)
    model = BasicCorefModel(configs)

    # Reload the model and evaluate
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['model_state_dict'])
    print('Evaluation on the dev set', flush=True)
    evaluate(model, dev_set, configs)['avg']
    print('Evaluation on the test set', flush=True)
    evaluate(model, test_set, configs)

    # Generate visualizations (for the test set)
    generate_coref_preds(model, test_set, '_predictions.json')
    generate_visualizations('_predictions.json', output_path)
    os.remove('_predictions.json')
Ejemplo n.º 7
0
    def __init__(self):
        self.dir_root = os.path.dirname(os.path.abspath(__file__))

        fname_settings = os.path.join(self.dir_root, 'settings.yaml')
        self.configs = get_configs(fname_settings)

        self.url_homepage = self.configs['URL']['HOMEPAGE']
        self.headers_base = self.configs['HEADERS']['BASE']

        self.url_login = self.configs['URL']['LOGIN']
        self.email = self.configs['AUTH']['EMAIL']
        self.password = self.configs['AUTH']['PASSWORD']
        self.payload_login = {
            'email': self.email,
            'password': self.password,
            'rememberme': 'y',
        }

        self.url_questions = self.configs['URL']['QUESTIONS']
        self.payload_question = self.configs['PAYLOAD']['QUESTION']

        self.url_question_prefix = self.configs['URL']['QUESTION_PREFIX']

        self.timeout_query = self.configs['TIMEOUT']['QUERY']

        self.offset = self.configs['OFFSET']

        self.spider = requests.Session()
Ejemplo n.º 8
0
def main():
    config = utils.get_configs()
    if not config['error']:
        params = config['return']
        utils.run_script(params['manga_name'], params['manga_chapters'],
                         params['manga_path'])
    else:
        [print(e) for e in config['error']]
Ejemplo n.º 9
0
    def __init__(self, subscribe_stack):
        configs = get_configs()

        self.ip = configs['broker_mqtt']['ip']
        self.port = configs['broker_mqtt']['port']
        self.user = configs['broker_mqtt']['user']
        self.password = configs['broker_mqtt']['pass']
        self.topic = bytes("GW_{}".format(configs['gateway']['uuid']), "utf-8")
        self.uuid = configs['gateway']['uuid']

        self.subscribe_stack = subscribe_stack
Ejemplo n.º 10
0
    def __init__(self, publish_stack):
        configs = get_configs()

        self.ip = configs['broker_mqtt']['ip']
        self.port = configs['broker_mqtt']['port']
        self.user = configs['broker_mqtt']['user']
        self.password = configs['broker_mqtt']['pass']
        self.topic = bytes(configs['broker_mqtt']['topic'], "utf-8")
        self.uuid = configs['gateway']['uuid']

        self.publish_stack = publish_stack
        self.wdt = WDT(timeout=1000 * 60 * 15)
Ejemplo n.º 11
0
    def __init__(self):
        configs = get_configs()

        self.project_name = configs['project_name']
        self.ap_ssid = self.project_name
        self.ap_password = ""
        self.ap_authmode = 0
        self.wlan_ap = network.WLAN(network.AP_IF)
        self.wlan_sta = network.WLAN(network.STA_IF)
        self.server_socket = None

        self.get_connection()
Ejemplo n.º 12
0
    def __init__(self, publish_stack):
        configs = get_configs()

        self.ip = configs['broker_mqtt']['ip']
        self.port = configs['broker_mqtt']['port']
        self.user = configs['broker_mqtt']['user']
        self.password = configs['broker_mqtt']['pass']
        self.topic = bytes(configs['broker_mqtt']['topic'], "utf-8")
        self.uuid = configs['gateway']['uuid']

        self.publish_stack = publish_stack
        self.devices = configs['devices']
        self.startWDT()
Ejemplo n.º 13
0
    def __init__(self):
        configs = get_configs()

        self.token = configs['git']['token']
        self.url = configs['git']['url']
        self.uuid = configs['gateway']['uuid']

        self.http_client = HttpClient(
            headers={'Authorization': 'token {}'.format(self.token)})
        self.github_repo = self.url.rstrip('/').replace(
            'https://github.com', 'https://api.github.com/repos')
        self.main_dir = "main"
        self.module = ''

        self.download_updates_if_available()
        self.apply_pending_updates_if_available()
Ejemplo n.º 14
0
    def start(self):
        log("Scheduler: EXEHDAGateway operating")

        configs = get_configs()
        configs.update({"type": "identification", "gathered_at": get_date()})
        self.publish_stack.insert(json.dumps(configs))

        while True:
            try:
                while self.subscribe_stack.length() > 0:
                    reply = None
                    data = json.loads(self.subscribe_stack.get())
                    self.subscribe_stack.delete()

                    if 'type' in data:
                        subscription_type = data['type']
                        configs = get_configs()

                        if subscription_type == "acknowledgement":
                            reply = self.process_acknowledgement(configs)

                        elif subscription_type == "operation":
                            if 'uuid' in data and 'identifier' in data:
                                device = self.get_device(
                                    configs['devices'], data['uuid'])
                                if device:
                                    reply = self.process_operation(
                                        data, device)
                                else:
                                    reply = json.dumps({
                                        "uuid":
                                        data['uuid'],
                                        "data":
                                        "device not found",
                                        "type":
                                        "operation_reply",
                                        "gathered_at":
                                        get_date(),
                                        "identifier":
                                        data['identifier']
                                    })
                            else:
                                reply = json.dumps({
                                    "data":
                                    "json 'uuid' or 'identifier' field not found",
                                    "type": "operation_reply",
                                    "gathered_at": get_date()
                                })

                        elif subscription_type == "scheduling":
                            if 'schedules' in data:
                                schedules = data['schedules']
                                for scheduling in schedules:
                                    if 'type' in scheduling and 'identifier' in scheduling:
                                        if scheduling['type'] == "create":
                                            if 'timestamp' in scheduling and 'uuid' in scheduling:
                                                must_be_stored = True  # enable persistency
                                                reply = self.process_scheduling_create(
                                                    scheduling,
                                                    configs['devices'],
                                                    must_be_stored)
                                            else:
                                                reply = json.dumps({
                                                    "data":
                                                    "json 'timestamp' or 'uuid' field not found",
                                                    "type":
                                                    "scheduling_reply",
                                                    "gathered_at":
                                                    get_date(),
                                                    "identifier":
                                                    scheduling['identifier']
                                                })
                                        elif scheduling['type'] == "read":
                                            reply = self.process_scheduling_read(
                                                scheduling)
                                        elif scheduling['type'] == "update":
                                            if 'data' in scheduling:
                                                reply = self.process_scheduling_update(
                                                    scheduling)
                                            else:
                                                reply = json.dumps({
                                                    "data":
                                                    "json 'data' field not found",
                                                    "type":
                                                    "scheduling_reply",
                                                    "gathered_at":
                                                    get_date(),
                                                    "identifier":
                                                    scheduling['identifier']
                                                })
                                        elif scheduling['type'] == "delete":
                                            reply = self.process_scheduling_delete(
                                                scheduling)
                                        else:
                                            reply = json.dumps({
                                                "data":
                                                "action: " +
                                                scheduling['type'] +
                                                " not recognitzed",
                                                "type":
                                                "scheduling_reply",
                                                "gathered_at":
                                                get_date(),
                                                "identifier":
                                                scheduling['identifier']
                                            })
                                    else:
                                        reply = json.dumps({
                                            "data":
                                            "json type or identifier field not found",
                                            "type":
                                            "scheduling_reply",
                                            "gathered_at":
                                            get_date()
                                        })
                            else:
                                reply = json.dumps({
                                    "data": "json 'schedules' field not found",
                                    "type": "scheduling_reply",
                                    "gathered_at": get_date()
                                })
                        else:
                            reply = json.dumps({
                                "data": "subscription type error",
                                "type": subscription_type + "_reply",
                                "gathered_at": get_date()
                            })
                    else:
                        reply = json.dumps({
                            "data": "json 'type' field not found",
                            "type": "reply",
                            "gathered_at": get_date()
                        })

                    if reply:
                        self.publish_stack.insert(reply)

                time.sleep(0.5)
            except Exception as e:
                log("Scheduler: {}".format(e))
                self.subscribe_stack.delete()
from utils import get_configs

print("******** Running pre-checks ********")
get_configs()
Ejemplo n.º 16
0
def train(config_name, gene_variant=None):
    # Prepare tokenizer, dataset, and model
    configs = get_configs(config_name, verbose=False)
    if configs['use_gene_features']:
        assert(not gene_variant is None)
        configs['gene_variant'] = gene_variant
    tokenizer = BertTokenizer.from_pretrained(configs['transformer'], do_basic_tokenize=False)
    train_set, dev_set, test_set = load_oneie_dataset(configs['base_dataset_path'], tokenizer)
    model = BasicCorefModel(configs)

    # Initialize the optimizer
    num_train_docs = len(train_set)
    epoch_steps = int(math.ceil(num_train_docs / configs['batch_size']))
    num_train_steps = int(epoch_steps * configs['epochs'])
    num_warmup_steps = int(num_train_steps * 0.1)
    optimizer = model.get_optimizer(num_warmup_steps, num_train_steps)
    print('Initialized optimizer')

    # Main training loop
    best_dev_score, iters, batch_loss = 0.0, 0, 0
    for epoch in range(configs['epochs']):
        #print('Epoch: {}'.format(epoch))
        print('\n')
        progress = tqdm.tqdm(total=epoch_steps, ncols=80,
                             desc='Train {}'.format(epoch))
        accumulated_loss = RunningAverage()

        train_indices = list(range(num_train_docs))
        random.shuffle(train_indices)
        for train_idx in train_indices:
            iters += 1
            inst = train_set[train_idx]
            iter_loss = model(inst, is_training=True)[0]
            iter_loss /= configs['batch_size']
            iter_loss.backward()
            batch_loss += iter_loss.data.item()
            if iters % configs['batch_size'] == 0:
                accumulated_loss.update(batch_loss)
                torch.nn.utils.clip_grad_norm_(model.parameters(), configs['max_grad_norm'])
                optimizer.step()
                optimizer.zero_grad()
                batch_loss = 0
                # Update progress bar
                progress.update(1)
                progress.set_postfix_str('Average Train Loss: {}'.format(accumulated_loss()))
        progress.close()

        # Evaluation after each epoch
        print('Evaluation on the dev set', flush=True)
        dev_score = evaluate(model, dev_set, configs)['avg']

        # Save model if it has better dev score
        if dev_score > best_dev_score:
            best_dev_score = dev_score
            # Evaluation on the test set
            print('Evaluation on the test set', flush=True)
            evaluate(model, test_set, configs)
            # Save the model
            save_path = os.path.join(configs['saved_path'], 'model.pt')
            torch.save({'model_state_dict': model.state_dict()}, save_path)
            print('Saved the model', flush=True)
from subprocess import call as spc
import sys

#------------------------------------------------------------------------------
### MODULES (CUSTOM) ###
#------------------------------------------------------------------------------

this_path = os.path.join(os.path.dirname(__file__), '../EPCN')
sys.path.append(this_path)
import utils

#------------------------------------------------------------------------------
### CONFIGURATIONS (LOCAL) ###
#------------------------------------------------------------------------------

configs = utils.get_configs()
base_dir = configs['raw_data_write_paths']['access']
base_log_path = configs['DEFAULT']['log_path']

#------------------------------------------------------------------------------
### CONFIGURATIONS (REMOTE) ###
#------------------------------------------------------------------------------

retrieval_path = 'http://opendap.bom.gov.au:8080/thredds/{}/bmrc/access-r-fc/ops/surface/'

#------------------------------------------------------------------------------
### FUNCTIONS ###
#------------------------------------------------------------------------------


#------------------------------------------------------------------------------
logging.basicConfig(
    level=logging.DEBUG,
    format=
    '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
    datefmt='%a, %d %b %Y %H:%M:%S',
    filename=time.strftime("%Y-%m-%d", time.localtime()) + '_cralwer.log',
    filemode='w')
urllib3_logger = logging.getLogger('requests')
urllib3_logger.setLevel(logging.WARNING)

REQUEST_PER_TIME = 10

CUR_DIR = os.path.dirname(os.path.abspath(__file__))
CUR_FNAME_SETTINGS = os.path.join(CUR_DIR, 'data', 'settings.yaml')
CUR_CONFIGS = get_configs(CUR_FNAME_SETTINGS)
CUR_HEADERS_BASE = CUR_CONFIGS['HEADERS']['BASE']
CUR_TIMEOUT_QUERY = CUR_CONFIGS['TIMEOUT']['QUERY']
COOKIES = os.path.join(CUR_DIR, 'data', 'cookies.data')
COOKIES_RAW = os.path.join(CUR_DIR, 'data', 'cookies_raw.data')

_XSRF_GLOBAL_FIELNAME = os.path.join(CUR_DIR, 'data', '_xsrf.dat')

DATA_STORAGE = 'DateStorage'
if not os.path.exists(DATA_STORAGE):
    os.mkdir(DATA_STORAGE)

DEBUG = False

NOW = time.strftime("%Y-%m-%d", time.localtime())
Ejemplo n.º 19
0
        pret = torch.cat(pret[::-1], dim=1).reshape(-1, self.depth + 1,
                                                    self.rdim)

        # calculate lambda-preturns
        g_lam_ret = val[:, -1, :]
        for i in range(self.depth - 1, -1, -1):
            g_lam_ret = (1 - lam[:, i, :]) * val[:, i, :] + lam[:, i, :] * (
                rew[:, i + 1, :] + gam[:, i + 1, :] * g_lam_ret)

        return pret, g_lam_ret


def main(env, args):
    model = Predictron((1, args.maze_size, args.maze_size),
                       core_depth=args.core_depth)
    t_start = time.time()
    for i in range(args.max_steps):
        mx, my = env.generate_labelled_mazes(args.batch_size)
        loss, lossp, lossl = model(mx, my)
        if i % 100 == 0:
            print(f'Ep: {i}\t | T: {time.time() - t_start:6.0f} |' +
                  f'L: {loss:.4f} | Lp: {lossp:.4f} | Ll: {lossl:.4f}|')


if __name__ == '__main__':
    from maze import MazeEnv
    from utils import get_configs
    args = get_configs()
    env = MazeEnv(args.maze_size, args.maze_size, args.maze_density)
    main(env, args)
Ejemplo n.º 20
0
def get_configs():
    dir_root = os.path.dirname(os.path.abspath(__file__))
    fname_settings = os.path.join(dir_root, 'zhihu_configs.yaml')
    return utils.get_configs(fname_settings)