Example #1
0
def load_config_data(env):

    c = {
        'env': env
    }

    config_loc = '{home}/config/{env}.yaml'.format(home=home, env=env)
    private_loc = '{home}/config/private.yaml'.format(home=home)

    c.update(load_yaml(config_loc) or {})
    c.update(load_yaml(private_loc) or {})

    return c, config_loc, private_loc
Example #2
0
def makeService(args):
    '''
    Create and launch main service
    '''
    mainService = service.MultiService()
    load_yaml(args['datadir'])
    services = get_subservices()()
    mainService.config = config
    for sub_service in services:
        mainService.services.append(sub_service)
        sub_service.parent = mainService
        if hasattr(sub_service, 'register_art_url'):
            mainService.register_art_url = sub_service.register_art_url

    return mainService
Example #3
0
    def directory_age(self, directory):
        """
        Returns the age in seconds of the backup of the given directory on the
        device.
        """
        # Find the date this copy of the directory was made. Read each log
        # entry, newest first, until one is found which records a copy
        # having been made. If the copy is verified, `dir_date` is its age.
        # If the copy is not verified, `dir_date` is None because it is faulty.
        try:
            device_log = load_yaml(self.log_path)
        except:
            return None
        dir_date = None
        for entry in reversed(device_log):
            try:
                if entry['directories'][directory['name']]['copied']:
                    try:
                        if entry['directories'][directory['name']]['verified']:
                            dir_date = entry['date']
                        else:
                            dir_date = None
                        break
                    except KeyError:
                        break
            except KeyError:
                pass

        # Calculate the time in seconds since the copy was made.
        if dir_date:
            elapsed_time = datetime.now() - \
                datetime.strptime(dir_date, '%d-%b-%Y %H:%M:%S')
            return elapsed_time.total_seconds()
        else:
            return None
Example #4
0
	def __init__(self, url, driver, yaml_path, delay=40):
		self.url = url
		self.driver = self.select_driver(driver)
		self.delay = delay
		self.wait = WebDriverWait(self.driver, self.delay)
		self.scenario = load_yaml(yaml_path)
		self.start()
Example #5
0
def makeService(args):
    '''
    Create and launch main service
    '''
#     imports = '''
# from onDemand.%s import %s
# player = %s('%s', %d)
# '''
#     endpoints = {}
#     clients = {}
    mainService = service.MultiService()
#     load_config(args['datadir'])
    load_yaml(args['datadir'])
    services = get_subservices()
    for sub_service in services:
        mainService.services.append(sub_service)
        sub_service.parent = mainService
        if hasattr(sub_service, 'register_art_url'):
            mainService.register_art_url = sub_service.register_art_url

    return mainService
def load_mongodb_conf():
    """
    Load MongoDB configurations
    :return:
    """
    cached_conf = load_mongodb_conf.conf

    if not cached_conf:
        conf_all = load_yaml()
        tmp = conf_all['mongodb'] if 'mongodb' in conf_all else []
        cached_conf = dict((item['profile'], item) for item in tmp)
        load_mongodb_conf.conf = cached_conf

    return cached_conf
Example #7
0
	def _load_configuration(self):
		"""
		Loads the configuration from the configuration file.
		"""

		config = utils.load_yaml(self.CONFIG_FILE_PATH)
		self._incoming_port = config['physical_ports']['incoming']
		self._outgoing_port = config['physical_ports']['outgoing']
		self._mode = Mode(config['mode'])
		self._flow_active_time_secs = config['flow_active_time_secs']
		self._time_to_keep_stats_secs = config['time_to_keep_stats_secs']
		self._flow_starts_retrieval_interval_secs = config['flow_starts_retrieval_interval_secs']
		self._firewall_dpid = config['firewall_dpid']
		self._blacklist_rules = [Rule(**rule_dict) for rule_dict in config['blacklist_rules']]
		self._whitelist_rules = [Rule(**rule_dict) for rule_dict in config['whitelist_rules']]
Example #8
0
    def parse(self, filepath):
        args = utils.load_yaml(filepath)
        print args
        task_name = args.keys()[0]
        self.data_dir = args[task_name]['data_dir']

        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)
        src_db = args[task_name]['src_db']
        dst_db = args[task_name]['dst_db']
        self.src_db = PgsqlDBProxy(src_db['HOST'], src_db['PORT'],
                                   src_db['USER'], src_db['PASSWORD'],
                                   src_db['DB_NAME'])
        self.dst_db = PgsqlDBProxy(dst_db['HOST'], dst_db['PORT'],
                                   dst_db['USER'], dst_db['PASSWORD'],
                                   dst_db['DB_NAME'])
        self.pairs = args[task_name]['pairs']
        self.dst_table_list = [pair['dst_table'] for pair in self.pairs]
Example #9
0
def main(screen):
    """
    Draws and redraws the screen.
    """
    # Hide the cursor.
    curses.curs_set(0)

    # Load config from file.
    config = load_yaml(os.path.expanduser('~/.suave/config.yml'))

    # Create boxes from config.
    boxes = []
    for box in config:
        boxes.append(
            Box(
                screen=screen,
                rows=box['rows'],
                columns=box['columns'],
                rows_offset=box['rows-offset'],
                columns_offset=box['columns-offset'],
                command=box['command'],
                interval=box['interval'],

            )
        )

    while True:
        # Redraw the screen only when it changes.
        if screen.is_wintouched():
            screen.clear()
            screen.refresh()

        # Give every box an opportunity to redraw if it has changed.
        [box.redraw_if_changed() for box in boxes]

        # Wait before redrawing again.
        curses.napms(1000)
def get_mysql_db(db_name, user=None, passwd=None, profile=None, host='localhost', port=3306):
    """
    建立MySQL连接
    :param db_name:
    :param user:
    :param passwd:
    :param profile:
    :param host:
    :param port:
    :return:
    """

    cached = getattr(get_mysql_db, 'cached', {})
    sig = '%s|%s|%s|%s|%s|%s' % (db_name, profile, host, port, user, passwd)
    if sig in cached:
        return cached[sig]

    cfg = load_yaml()
    if profile and profile in cfg:
        section = cfg[profile]
        host = section.get('host', 'localhost')
        port = int(section.get('port', '3306'))
        user = section.get('user', None)
        passwd = section.get('passwd', None)
#   Primary Author: Mayank Mohindra <*****@*****.**>
#
#   Purpose: db.sqlite3 initiate

from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash
import uuid

from utils import load_yaml

config = load_yaml('config.yaml')

app = Flask(__name__)
app.config['SECRET_KEY'] = config.get('SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] = config.get('DATABASE_URI')
db = SQLAlchemy(app)


class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    public_id = db.Column(db.String(50), unique=True)
    username = db.Column(db.String(50), unique=True)
    password = db.Column(db.String(80))
    admin = db.Column(db.Boolean)


db.create_all()

username = config.get('ADMIN_USERNAME')
password = config.get('ADMIN_PASSWORD')
import os
import torch
import numpy as np
import sklearn
import tqdm

import utils
import model
import loss_function

# load yaml
param = utils.load_yaml()


def evalute_test_machine(model_name, target_dir, pool_type='mean'):
    global csv_lines
    # hyperparameter
    n_mels = param['n_mels']
    frames = param['frames']
    result_dir = param['result_dir']

    machine_type = os.path.split(target_dir)[1]
    # result csv
    csv_lines.append([machine_type])
    csv_lines.append(['id', 'AUC', 'pAUC'])
    performance = []
    # load model
    model_dir = param['model_dir']
    model_path = f'{model_dir}/{model_name}/{machine_type}/model_{machine_type}.pkl'
    if model_name == 'AE':
        ae_net = model.Auto_encoder(input_dim=frames * n_mels,
Example #13
0
            print("ce info error")
        print("kpis\teach_step_duration_card%s\t%s" % (card_num, ce_time))
        print("kpis\ttrain_cost_card%s\t%f" % (card_num, ce_cost))
        print("kpis\ttrain_precision_card%s\t%f" % (card_num, ce_p))
        print("kpis\ttrain_recall_card%s\t%f" % (card_num, ce_r))
        print("kpis\ttrain_f1_card%s\t%f" % (card_num, ce_f1))


def get_cards():
    num = 0
    cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
    if cards != '':
        num = len(cards.split(","))
    return num


if __name__ == "__main__":
    # 参数控制可以根据需求使用argparse,yaml或者json
    # 对NLP任务推荐使用PALM下定义的configure,可以统一argparse,yaml或者json格式的配置文件。

    parser = argparse.ArgumentParser(__doc__)
    utils.load_yaml(parser, 'conf/args.yaml')

    args = parser.parse_args()
    check_cuda(args.use_cuda)
    check_version()

    print(args)

    do_train(args)
Example #14
0
from utils import load_yaml, dump_obj
import sys

for fn in sys.argv[1:]:
    data = load_yaml(open(fn))
    data.pop("contact_details")
    dump_obj(data, filename=fn)
Example #15
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", False)
    test_data_dir = config.get("dygraph.test_data_dir", None)
    epochs = config.get("dygraph.epochs", None)
    print_interval = config.get("dygraph.print_interval", None)
    model_load_path = config.get("dygraph.infer_load_path", "inference")
    start_epoch = config.get("dygraph.infer_start_epoch", -1)
    end_epoch = config.get("dygraph.infer_end_epoch", 1)
    sentence_left_size = config.get("hyper_parameters.sentence_left_size")
    sentence_right_size = config.get("hyper_parameters.sentence_right_size")

    print("***********************************")
    logger.info(
        "use_gpu: {}, test_data_dir: {}, epochs: {}, print_interval: {}, model_load_path: {}"
        .format(use_gpu, test_data_dir, epochs, print_interval,
                model_load_path))
    print("***********************************")

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    pyramid_model = create_model(config)
    # to do init model
    file_list = [
        os.path.join(test_data_dir, x) for x in os.listdir(test_data_dir)
    ]
    print("read data")
    dataset = LetorDataset(file_list)
    test_dataloader = create_data_loader(dataset, place=place, config=config)

    epoch_begin = time.time()
    interval_begin = time.time()

    for epoch_id in range(start_epoch + 1, end_epoch):

        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_model(model_path, pyramid_model)

        for batch_id, batch in enumerate(test_dataloader()):
            batch_size = config.get("dygraph.batch_size", 128)

            inputs = create_feeds(batch, sentence_left_size,
                                  sentence_right_size)

            prediction = pyramid_model(inputs)

            if batch_id % print_interval == 0:
                logger.info(
                    "infer epoch: {}, batch_id: {}, prediction: {}, speed: {:.2f} ins/s"
                    .format(
                        epoch_id, batch_id, prediction.numpy(),
                        print_interval * batch_size /
                        (time.time() - interval_begin)))
                interval_begin = time.time()

        logger.info(
            "infer epoch: {} done, prediction: {}, : epoch time{:.2f} s".
            format(epoch_id, prediction.numpy(),
                   time.time() - epoch_begin))
Example #16
0
import os

from utils import load_yaml

BASE_PATH = os.path.expanduser('~/.equanimity')

config = load_yaml(os.path.join(BASE_PATH, 'config.yml'))

COMPUTER_NAME = config['computer_name']

MEDIA_PATH = config['media_path']

DIRECTORIES = load_yaml(os.path.join(BASE_PATH, 'directories.yml'))

DEVICES = load_yaml(os.path.join(BASE_PATH, 'devices.yml'))

LOG_DIR = os.path.join(os.path.join(BASE_PATH, 'logs'))

# Determine which backup device is present.
DEVICE = None
for d in DEVICES:
    if os.path.exists(d['path']):
        DEVICE = d
Example #17
0
 def from_yaml(cls, filename):
     with open(filename) as f:
         data = load_yaml(f)
     return cls(filename, data)
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-c', '--config', type=str, default='./config.yml',
        help='Specify training config file.'
    )
    args = parser.parse_args()

    # Setup directory that saves the experiment results.
    dirname: str = datetime.now().strftime('%Y%m%d_%H-%M-%S')
    save_dir: str = os.path.join('../experiments', dirname)
    os.makedirs(save_dir, exist_ok=False)
    weights_dir: str = os.path.join(save_dir, 'weights')
    os.makedirs(weights_dir, exist_ok=False)

    cfg_dict: Dict[str, Any] = utils.load_yaml(args.config)
    cfg: utils.DotDict = utils.DotDict(cfg_dict)
    logger.info(f'Training configurations: {cfg}')

    device: str = 'cuda' if torch.cuda.is_available() else 'cpu'

    model = models.utils.load_model(
        num_classes=cfg.num_classes,
        architecture=cfg.model.architecture,
        backbone=cfg.model.backbone,
        pretrained=True
    )
    model = model.to(device)

    criterion = cfg_tools.load_loss(cfg.loss.name, **cfg.loss.params)
Example #19
0
File: app.py Project: lpmi-13/hedy
def load_adventures_in_all_languages():
    adventures = {}
    for lang in ALL_LANGUAGES.keys ():
        adventures[lang] = load_yaml(f'coursedata/adventures/{lang}.yaml')
    return adventures
Example #20
0
                representing a single batch.
        """
        encoded = self.encode(inputs)
        return self.linear(encoded).squeeze(1)

    def predict(self, inputs, score=False):
        """
            Predict the class membership for each instance in the provided 
            inputs
            Args:
                inputs - list(list(int)) - List of vocab-encoded text sequences
        """
        val = torch.sigmoid(self.forward(inputs))
        if score:
            return val
        return torch.round(val)


if __name__ == '__main__':

    config = load_yaml('./config.yaml')
    model = ZhangCNNLSTMModel(config)
    num_examples = 10
    batch_count = 0
    for examples, labels in generate_rand_data(config['vocab_size'],
                                               config['max_len'],
                                               num_examples):
        preds = model(examples)
        batch_count += 1
    assert (batch_count == num_examples)
Example #21
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", True)
    test_data_dir = config.get("dygraph.test_data_dir", None)
    feature_size = config.get('hyper_parameters.feature_size', None)
    print_interval = config.get("dygraph.print_interval", None)
    model_load_path = config.get("dygraph.infer_load_path", "model_output")
    start_epoch = config.get("dygraph.infer_start_epoch", -1)
    end_epoch = config.get("dygraph.infer_end_epoch", 10)

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    print("***********************************")
    logger.info(
        "use_gpu: {}, test_data_dir: {}, start_epoch: {}, end_epoch: {}, print_interval: {}, model_load_path: {}"
        .format(use_gpu, test_data_dir, start_epoch, end_epoch, print_interval,
                model_load_path))
    print("***********************************")

    textcnn_model = create_model(config)
    file_list = [
        os.path.join(test_data_dir, x) for x in os.listdir(test_data_dir)
    ]
    print("read data")
    dataset = TextCNNDataset(file_list)
    test_dataloader = create_data_loader(dataset, place=place, config=config)

    acc_metric = paddle.metric.Accuracy()
    epoch_begin = time.time()
    interval_begin = time.time()

    for epoch_id in range(start_epoch + 1, end_epoch):

        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_model(model_path, textcnn_model)
        for batch_id, batch in enumerate(test_dataloader()):
            batch_size = len(batch[0])

            input_data, label = create_feeds(batch)

            pred = textcnn_model.forward(input_data)

            # for acc
            prediction = paddle.nn.functional.softmax(pred)
            correct = acc_metric.compute(prediction, label)
            acc_metric.update(correct)

            if batch_id % print_interval == 1:
                logger.info(
                    "infer epoch: {}, batch_id: {}, acc: {:.6f}, speed: {:.2f} ins/s"
                    .format(
                        epoch_id, batch_id, acc_metric.accumulate(),
                        print_interval * batch_size /
                        (time.time() - interval_begin)))
                interval_begin = time.time()

        logger.info(
            "infer epoch: {} done, auc: {:.6f}, : epoch time{:.2f} s".format(
                epoch_id, acc_metric.accumulate(),
                time.time() - epoch_begin))
Example #22
0
 def __init__(self, first_names, last_names, data_file):
     super(NameDetector, self).__init__('Detect personal names')
     self.first_names = WLTStorage(first_names)
     self.last_names = WLTStorage(last_names)
     self.data = load_yaml(data_file)
     self.majka = Majka()
                    help="model path",
                    type=str,
                    required=True)
args = parser.parse_args()

# SAMPLE_RATE = 32000
# IMAGE_SIZE = 224

# submission.csv will be overwitten if everything goes well
submission = pd.read_csv(utils.DATA_DIR / "sample_submission.csv")
submission["birds"] = "a"
submission.to_csv("submission.csv", index=False)

model_dir = Path(args.model_dir)
config_path = model_dir / ".hydra" / "config.yaml"
config = utils.load_yaml(config_path)

# model_config_list = []
# if args.cv:
#     print("predict cv model")
#     FOLD = 5
#     if args.debug:
#         FOLD = 1
#     for i in range(FOLD):
#         model_config = {
#             "path": model_dir / f"best_model_fold{i}.pth",
#             "model_name": config["model"]["name"],
#             "n_class": len(utils.BIRD_CODE),
#             "in_chans": config["model"]["in_chans"],
#         }
#         model_config_list.append(model_config)
Example #24
0
#!/usr/bin/env python3

"""
Note that this is a one-off script to pull in the lexemes.yaml data from
morphological-lexicon and store it here.

Once the reduced lexemes.yaml is in this repo, this script exists only for
historical interest and reproducibility.
"""

from utils import load_yaml, sorted_items

for key, value in sorted_items(load_yaml("../../morphgnt/morphological-lexicon/lexemes.yaml")):
    print("{}:".format(key))
    headword = value.get("full-citation-form", value.get("danker-entry", key))
    gloss = value.get("gloss")
    print("    headword: {}".format(headword))
    if gloss:
        print("    gloss: {}".format(gloss))
Example #25
0
def main(args=None):
    args = utils.parse_args(create_parser(), args)
    if args.logging_config is not None:
        logging.config.dictConfig(utils.load_yaml(args.logging_config))
    save_dir = pathlib.Path(args.save_dir)
    if (not args.overwrite and save_dir.exists()
            and utils.has_element(save_dir.glob("*.json"))):
        raise FileExistsError(f"save directory ({save_dir}) is not empty")
    shell = utils.ShellUtils()
    shell.mkdir(save_dir, silent=True)
    logger = logging.getLogger("interpolate")
    data_dir = pathlib.Path(args.data_dir)
    data = {
        split: list(
            map(Dialog.from_json,
                utils.load_json(data_dir.joinpath(f"{split}.json"))))
        for split in set(args.splits)
    }
    processor: DialogProcessor = utils.load_pickle(args.processor_path)
    logger.info("preparing model...")
    torchmodels.register_packages(models)
    model_cls = torchmodels.create_model_cls(models, args.model_path)
    model: models.AbstractTDA = model_cls(processor.vocabs)
    model.reset_parameters()
    model.load_state_dict(torch.load(args.ckpt_path))
    device = torch.device("cpu")
    if args.gpu is not None:
        device = torch.device(f"cuda:{args.gpu}")
    model = model.to(device)
    samples = (sample_data(data,
                           args.anchor1), sample_data(data, args.anchor2))
    formatter = utils.DialogTableFormatter()
    logger.info(f"first sample: \n{formatter.format(samples[0])}")
    logger.info(f"second sample: \n{formatter.format(samples[1])}")
    logger.info("preparing environment...")
    dataloader = datasets.create_dataloader(dataset=datasets.DialogDataset(
        data=samples, processor=processor),
                                            batch_size=1,
                                            shuffle=False,
                                            pin_memory=False)
    inferencer = InterpolateInferencer(model=model,
                                       processor=processor,
                                       device=device)
    logger.info("interpolating...")
    with torch.no_grad():
        zconv_a, zconv_b = inferencer.encode(dataloader)
        zconv = torch.stack([
            zconv_a + (zconv_b - zconv_a) / args.steps * i
            for i in range(args.steps + 1)
        ])
        gen_samples = inferencer.generate(td.DataLoader(zconv, shuffle=False))
    # use original data points for two extremes
    samples = [samples[0]] + list(gen_samples[1:-1]) + [samples[1]]
    logger.info("interpolation results: ")
    for i, sample in enumerate(samples):
        logger.info(f"interpolation step {i / args.steps:.2%}: \n"
                    f"{formatter.format(sample)}")
    logger.info("saving results...")
    json_dir = save_dir.joinpath("json")
    json_dir.mkdir(exist_ok=True)
    for i, sample in enumerate(samples, 1):
        utils.save_json(sample.to_json(), json_dir.joinpath(f"{i:02d}.json"))
    tbl_dir = save_dir.joinpath("table")
    tbl_dir.mkdir(exist_ok=True)
    for i, sample in enumerate(samples, 1):
        utils.save_lines([formatter.format(sample)],
                         tbl_dir.joinpath(f"{i:02d}.txt"))
    ltx_dir = save_dir.joinpath("latex")
    ltx_dir.mkdir(exist_ok=True)
    ltx_formatter = utils.DialogICMLLatexFormatter()
    for i, sample in enumerate(samples, 1):
        utils.save_lines([ltx_formatter.format(sample)],
                         ltx_dir.joinpath(f"{i:02d}.tex"))
    logger.info("done!")
Example #26
0
def write_csv(files, jurisdiction_id, output_filename):
    with open(output_filename, "w") as outf:
        out = csv.DictWriter(
            outf,
            (
                "id",
                "name",
                "current_party",
                "current_district",
                "current_chamber",
                "given_name",
                "family_name",
                "gender",
                "biography",
                "birth_date",
                "death_date",
                "image",
                "links",
                "sources",
                "capitol_address",
                "capitol_email",
                "capitol_voice",
                "capitol_fax",
                "district_address",
                "district_email",
                "district_voice",
                "district_fax",
                "twitter",
                "youtube",
                "instagram",
                "facebook",
            ),
        )
        out.writeheader()

        for filename in files:
            with open(filename) as f:
                data = load_yaml(f)

                # current party
                for role in data["party"]:
                    if role_is_active(role):
                        current_party = role["name"]
                        break

                # current district
                for role in data["roles"]:
                    if role_is_active(role):
                        current_chamber = role["type"]
                        current_district = role["district"]

                district_address = district_email = district_voice = district_fax = None
                capitol_address = capitol_email = capitol_voice = capitol_fax = None
                for cd in data.get("contact_details", {}):
                    note = cd["note"].lower()
                    if "district" in note:
                        district_address = cd.get("address")
                        district_email = cd.get("email")
                        district_voice = cd.get("voice")
                        district_fax = cd.get("fax")
                    elif "capitol" in note:
                        capitol_address = cd.get("address")
                        capitol_email = cd.get("email")
                        capitol_voice = cd.get("voice")
                        capitol_fax = cd.get("fax")
                    else:
                        click.secho("unknown office: " + note, fg="red")

                links = ";".join(l["url"] for l in data.get("links", []))
                sources = ";".join(l["url"] for l in data.get("sources", []))

                obj = {
                    "id": data["id"],
                    "name": data["name"],
                    "current_party": current_party,
                    "current_district": current_district,
                    "current_chamber": current_chamber,
                    "given_name": data.get("given_name"),
                    "family_name": data.get("family_name"),
                    "gender": data.get("gender"),
                    "biography": data.get("biography"),
                    "birth_date": data.get("birth_date"),
                    "death_date": data.get("death_date"),
                    "image": data.get("image"),
                    "twitter": data.get("ids", {}).get("twitter"),
                    "youtube": data.get("ids", {}).get("youtube"),
                    "instagram": data.get("ids", {}).get("instagram"),
                    "facebook": data.get("ids", {}).get("facebook"),
                    "links": links,
                    "sources": sources,
                    "district_address": district_address,
                    "district_email": district_email,
                    "district_voice": district_voice,
                    "district_fax": district_fax,
                    "capitol_address": capitol_address,
                    "capitol_email": capitol_email,
                    "capitol_voice": capitol_voice,
                    "capitol_fax": capitol_fax,
                }
                out.writerow(obj)

    click.secho(f"processed {len(files)} files", fg="green")
Example #27
0
    weights_dir: str = os.path.join(save_dir, 'weights')
    os.makedirs(weights_dir, exist_ok=False)
    return save_dir


if __name__ == '__main__':
    utils.seed_everything(seed=428)
    sns.set()
    save_dir: str = create_experiment_directories()

    with open('logger_conf.yaml', 'r') as f:
        log_config: Dict[str, Any] = yaml.safe_load(f.read())
        logging.config.dictConfig(log_config)

    logger = getLogger(__name__)
    cfg_dict: Dict[str, Any] = utils.load_yaml('config.yml')
    cfg: utils.DotDict = utils.DotDict(cfg_dict)

    dtrain = InstanceSegmentationDataset(
        albu.core.serialization.from_dict(cfg.albumentations.train.todict()))
    dvalid = InstanceSegmentationDataset(
        albu.core.serialization.from_dict(cfg.albumentations.eval.todict()))

    indices = list(range(len(dtrain)))
    dtrain = torch.utils.data.Subset(dtrain, indices[:-50])  # type: ignore
    dvalid = torch.utils.data.Subset(dvalid, indices[-50:])  # type: ignore

    train_loader = torch.utils.data.DataLoader(dtrain,
                                               batch_size=cfg.batch_size,
                                               shuffle=True,
                                               collate_fn=collate_fn,
Example #28
0
try:
    exp_name = utils.make_experiment_name(args.debug)
    result_dir = utils.RESULTS_BASE_DIR / exp_name
    os.mkdir(result_dir)

    logger = mylogger.get_mylogger(filename=result_dir / 'log')
    sandesh.send(f'start: {exp_name}')
    logger.debug(f'created: {result_dir}')
    logger.debug('loading data ...')

    train_feat_path = utils.FEATURE_DIR / 'baseline_features.pkl'
    X = utils.load_pickle(train_feat_path)
    print(X.columns)

    features_list = utils.load_yaml(args.feature)
    utils.dump_yaml(features_list, result_dir / 'features_list.yml')
    all_features = features_list['features']['original'] + \
        features_list['features']['generated']
    categorical_feat = features_list['categorical_features']

    logger.debug(all_features)
    logger.debug(f'features num: {len(all_features)}')
    utils.dump_yaml(features_list, result_dir / 'features_list.yml')

    # X_test = X_test[all_features]

    # sandesh.send(args.config)
    config = utils.load_yaml(args.config)
    logger.debug(config)
    utils.dump_yaml(config, result_dir / 'model_config.yml')
Example #29
0
argparser.add_argument(
    "--backend", default="backends.LaTeX",
    help="python class to use for backend (defaults to backends.LaTeX)")


args = argparser.parse_args()

verses = parse_verse_ranges(args.verses)

if args.exclude:
    exclusions = load_wordset(args.exclude)
else:
    exclusions = set()

if args.glosses:
    glosses = load_yaml(args.glosses)
else:
    glosses = None

if args.headwords:
    headwords = load_yaml(args.headwords)
else:
    headwords = {}


def verb_parse(ccat_parse):
    text = ccat_parse[1:4]
    if ccat_parse[3] in "DISO":
        text += " " + ccat_parse[0] + ccat_parse[5]
    elif ccat_parse[3] == "P":
        text += " " + ccat_parse[4:7]
Example #30
0

#            print('Test Step: {}/{} Loss: {:.4f} \t Acc: {:.4f}'.format(batch_idx,len(dataloader), loss_record(), acc_record()))

    return loss_record(), acc_record()

if __name__ == '__main__':

    experiment_dir = r'D:\2020\Trainings\self_supervised_learning\experiments\sl_exp_1'
    config_file = os.path.join(experiment_dir, 'config.yaml')

    assert os.path.isfile(
        config_file), "No parameters config file found at {}".format(
            config_file)

    cfg = utils.load_yaml(config_file, config_type='object')

    use_cuda = cfg.use_cuda and torch.cuda.is_available()
    cfg.use_cuda = use_cuda
    device = torch.device(
        "cuda:{}".format(cfg.cuda_num) if use_cuda else "cpu")

    ## get the dataloaders
    _, _, dloader_test = dataloaders.get_dataloaders(cfg, val_split=.2)

    # Load the model
    model = models.get_model(cfg)
    model = model.to(device)
    criterion = nn.CrossEntropyLoss()

    test_loss, test_acc = test(model, device, dloader_test, criterion,
Example #31
0
 def __init__(self):
     self.settings = load_yaml("LaTeX.yaml")
Example #32
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("use_gpu", True)
    test_data_dir = config.get("dygraph.test_data_dir", None)
    feature_size = config.get('hyper_parameters.feature_size', None)
    print_interval = config.get("dygraph.print_interval", None)
    model_load_path = config.get("dygraph.infer_load_path", "model_output")
    start_epoch = config.get("dygraph.infer_start_epoch", -1)
    end_epoch = config.get("dygraph.infer_end_epoch", 10)

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    mmoe_model = create_model(config)
    file_list = [
        os.path.join(test_data_dir, x) for x in os.listdir(test_data_dir)
    ]
    print("read data")
    dataset = CensusDataset(file_list)
    test_dataloader = create_data_loader(dataset,
                                         mode='test',
                                         place=place,
                                         config=config)

    auc_metric_marital = paddle.metric.Auc("ROC")
    auc_metric_income = paddle.metric.Auc("ROC")
    epoch_begin = time.time()
    interval_begin = time.time()

    for epoch_id in range(start_epoch + 1, end_epoch):

        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_model(model_path, mmoe_model)
        for batch_id, batch in enumerate(test_dataloader()):
            batch_size = len(batch[0])

            input_data, label_income, label_marital = create_feeds(
                batch, feature_size)

            pred_income, pred_marital = mmoe_model(input_data)

            # for auc
            auc_metric_income.update(preds=pred_income.numpy(),
                                     labels=label_income.numpy())
            auc_metric_marital.update(preds=pred_marital.numpy(),
                                      labels=label_marital.numpy())

            if batch_id % print_interval == 1:
                logger.info(
                    "infer epoch: {}, batch_id: {}, auc_income: {:.6f}, auc_marital: {:.6f}, speed: {:.2f} ins/s"
                    .format(
                        epoch_id, batch_id, auc_metric_income.accumulate(),
                        auc_metric_marital.accumulate(), print_interval *
                        batch_size / (time.time() - interval_begin)))
                interval_begin = time.time()

        logger.info(
            "infer epoch: {} done, auc_income: {:.6f}, auc_marital: {:.6f}, : epoch time{:.2f} s"
            .format(epoch_id, auc_metric_income.accumulate(),
                    auc_metric_marital.accumulate(),
                    time.time() - epoch_begin))
Example #33
0
def load_config():
    """
    Loads the configuration and parses the command line arguments.

    This function is the "main" function of this module and brings together all
    of the modules various functions.

    After this function executes, :data:`CONFIG` will contain the final
    configuration, and :data:`ARGS` will contain any left over command line
    arguments that weren't parsed (which will likely be the command that the
    user wants to execute).

    :returns: A ``dict`` containing the final configuration.

    """

    global ARGS
    options, ARGS = parse_arguments()
    options = dict(i for i in options.__dict__.items() if i[1] is not None)

    if "verbosity" in options:
        logcontrol.set_level(options["verbosity"])

    logger.debug(
        "Command line options passed in...\n%s",
        pprint.pformat(options)
    )
    logger.debug(
        "Command line arguments passed in...\n%s",
        pprint.pformat(ARGS)
    )

    # Try and find a configuration file
    config_file_path = None
    if options.get("config") is not None:
        config_file_path = options["config"]
    else:
        # Figure out all of the places we should look for a configuration file.
        possible_config_paths = generate_search_path()

        # Ensure any ., .., and ~ symbols are correctly handled.
        possible_config_paths = utils.resolve_paths(possible_config_paths)

        logger.debug(
            "Searching for configuration file in...\n%s",
            pprint.pformat(possible_config_paths, width = 72)
        )

        for i in possible_config_paths:
            if os.path.isfile(i):
                config_file_path = i
                break
    configuration = {}
    if config_file_path is None:
        logger.info("No configuration file found.")
    else:
        logger.info("Loading configuration file at %s.", config_file_path)

        try:
            f = open(config_file_path)
        except IOError:
            logger.critical(
                "Could not open configuration file at %s.",
                config_file_path,
                exc_info = sys.exc_info()
            )
            raise

        try:
            configuration = utils.load_yaml(f)

            if not isinstance(configuration, dict):
                logger.critical(
                    "Your configuration file is not properly formatted. "
                    "The top level item must be a dictionary."
                )
                sys.exit(1)
        except ValueError:
            logger.critical(
                "Could not parse configuration file at %s.",
                config_file_path,
                exc_info = sys.exc_info()
            )
            raise
        finally:
            f.close()

    # Make a dictionary with the default values in it
    default_configuration = dict(
        (i.name, i.default_value) for i in KNOWN_OPTIONS.values()
                if i.default_value is not None
    )

    # Join the various dictionaries we have together. Priority is bottom-to-top.
    final_config = dict(
        default_configuration.items() +
        configuration.items() +
        options.items()
    )

    for i in (j.name for j in KNOWN_OPTIONS.values() if j.required):
        if i not in final_config:
            logger.critical(
                "Required value %s is unspecified. This value needs to be "
                "set in either the configuration file or on the command line.",
                i
            )
            sys.exit(1)

    # Go through and resolve any paths
    for i in (j.name for j in KNOWN_OPTIONS.values() if j.data_type is Path):
        if i in final_config:
            final_config[i] = utils.resolve_path(final_config[i])

    return final_config
Example #34
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", True)
    train_data_dir = config.get("dygraph.train_data_dir", None)
    epochs = config.get("dygraph.epochs", None)
    print_interval = config.get("dygraph.print_interval", None)
    model_save_path = config.get("dygraph.model_save_path", "model_output")
    wide_input_dim = config.get('hyper_parameters.wide_input_dim', None)
    deep_input_dim = config.get('hyper_parameters.deep_input_dim', None)

    print("***********************************")
    logger.info(
        "use_gpu: {}, train_data_dir: {}, epochs: {}, print_interval: {}, model_save_path: {}"
        .format(use_gpu, train_data_dir, epochs, print_interval,
                model_save_path))
    print("***********************************")

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    wide_deep_model = create_model(config)
    model_init_path = config.get("dygraph.model_init_path", None)
    if model_init_path is not None:
        load_model(model_init_path, wide_deep_model)

    # to do : add optimizer function
    optimizer = paddle.optimizer.Adam(parameters=wide_deep_model.parameters())

    file_list = [
        os.path.join(train_data_dir, x) for x in os.listdir(train_data_dir)
    ]
    print("read data")
    dataset = WideDeepDataset(file_list)
    train_dataloader = create_data_loader(dataset, place=place, config=config)

    last_epoch_id = config.get("last_epoch", -1)

    for epoch_id in range(last_epoch_id + 1, epochs):
        # set train mode
        wide_deep_model.train()
        auc_metric = paddle.metric.Auc("ROC")
        acc_metric = paddle.metric.Accuracy()
        epoch_begin = time.time()
        interval_begin = time.time()
        train_reader_cost = 0.0
        train_run_cost = 0.0
        total_samples = 0
        reader_start = time.time()

        for batch_id, batch in enumerate(train_dataloader()):
            train_reader_cost += time.time() - reader_start
            optimizer.clear_grad()
            train_start = time.time()
            batch_size = len(batch[0])

            label, wide_tensor, deep_tensor = create_feeds(
                batch, wide_input_dim, deep_input_dim)

            prediction = wide_deep_model.forward(wide_tensor, deep_tensor)
            loss = create_loss(prediction, label)

            loss.backward()
            optimizer.step()
            train_run_cost += time.time() - train_start
            total_samples += batch_size
            pred = paddle.nn.functional.sigmoid(paddle.clip(prediction,
                                                            min=-15.0,
                                                            max=15.0),
                                                name="prediction")
            label_int = paddle.cast(label, 'int64')

            # for acc
            correct = acc_metric.compute(pred, label_int)
            acc_metric.update(correct)
            # for auc
            predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
            auc_metric.update(preds=predict_2d.numpy(),
                              labels=label_int.numpy())

            if batch_id % print_interval == 1:
                logger.info(
                    "epoch: {}, batch_id: {}, auc: {:.6f}, acc: {:.5f}, avg_reader_cost: {:.5f} sec, avg_batch_cost: {:.5f} sec, avg_samples: {:.5f}, ips: {:.5f} images/sec"
                    .format(
                        epoch_id, batch_id, auc_metric.accumulate(),
                        acc_metric.accumulate(),
                        train_reader_cost / print_interval,
                        (train_reader_cost + train_run_cost) / print_interval,
                        total_samples / print_interval,
                        total_samples / (train_reader_cost + train_run_cost)))
                train_reader_cost = 0.0
                train_run_cost = 0.0
                total_samples = 0
            reader_start = time.time()

        logger.info(
            "epoch: {} done, auc: {:.6f}, acc: {:.6f}, : epoch time{:.2f} s".
            format(epoch_id, auc_metric.accumulate(), acc_metric.accumulate(),
                   time.time() - epoch_begin))

        save_model(wide_deep_model,
                   optimizer,
                   model_save_path,
                   epoch_id,
                   prefix='rec')
Example #35
0
argparser.add_argument(
    "--lexicon", dest="lexemes",
    default="../morphological-lexicon/lexemes.yaml",
    help="path to morphological-lexicon lexemes.yaml file "
         "(defaults to ../morphological-lexicon/lexemes.yaml)")

args = argparser.parse_args()

verses = parse_verse_ranges(args.verses)

if args.exclude:
    exclusions = load_wordset(args.exclude)
else:
    exclusions = set()

lexemes = load_yaml(args.lexemes)

if args.glosses:
    glosses = load_yaml(args.glosses)
else:
    glosses = {}


for entry in get_morphgnt(verses):
    if entry[0] == "WORD":
        lemma = entry[1]["lemma"]
        if lemma not in exclusions and lemma not in glosses:
            glosses[lemma] = {"default": lexemes[lemma].get("gloss", "\"@@@\"")}

for lemma, gloss_entries in sorted_items(glosses):
    print("{}:".format(lemma))
Example #36
0
from flask import Blueprint
from .views import FeedView
import utils


config= utils.load_yaml(utils.FEED_CONFIG)

bp= Blueprint('feed', __name__)
for i,x in enumerate(config['feeds']):
	bp.add_url_rule(f'/feed_{i}', view_func=FeedView.as_view(x['name'], url=x['url']))
Example #37
0
    help="Whether to use the model with "
    "character level or word level embedding. Specify the option "
    "if you want to use character level embedding")
PARSER.add_argument("--model_config",
                    type=str,
                    default="config/rnn.yml",
                    help="Location of model config")
PARSER.add_argument("--model_dir",
                    type=str,
                    default="models",
                    help="Location to save the model")
ARGS = PARSER.parse_args()

if __name__ == "__main__":
    # Load necessary configs
    model_config = load_yaml(ARGS.model_config)
    device = -1  # Use CPU as a default device

    # Preparing seed
    torch.manual_seed(0)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(0)
        device = None  # Use GPU when available

    if ARGS.char_level:
        tokenize = lambda s: list(s)
    else:
        tokenize = lambda s: s.split()
    # Preparing dataset
    # Get dataset name
    dataset_path = '/'.join(ARGS.dataset.split("/")[:-1])
Example #38
0
def main():
    args = utils.parse_args(create_parser())
    if args.logging_config is not None:
        logging.config.dictConfig(utils.load_yaml(args.logging_config))
    save_dir = pathlib.Path(args.save_dir)
    if (not args.overwrite and
            save_dir.exists() and utils.has_element(save_dir.glob("*.json"))):
        raise FileExistsError(f"save directory ({save_dir}) is not empty")
    shell = utils.ShellUtils()
    shell.mkdir(save_dir, silent=True)
    logger = logging.getLogger("train")
    utils.seed(args.seed)
    logger.info("loading data...")
    load_fn = utils.chain_func(
        lambda data: list(map(Dialog.from_json, data)),
        utils.load_json
    )
    data_dir = pathlib.Path(args.data_dir)
    train_data = load_fn(str(data_dir.joinpath("train.json")))
    valid_data = load_fn(str(data_dir.joinpath("dev.json")))
    processor = datasets.DialogProcessor(
        sent_processor=datasets.SentProcessor(
            bos=True,
            eos=True,
            lowercase=True,
            tokenizer="space",
            max_len=30
        ),
        boc=True,
        eoc=True,
        state_order="randomized",
        max_len=30
    )
    processor.prepare_vocabs(list(itertools.chain(train_data, valid_data)))
    utils.save_pickle(processor, save_dir.joinpath("processor.pkl"))
    logger.info("preparing model...")
    utils.save_json(utils.load_yaml(args.model_path),
                    save_dir.joinpath("model.json"))
    torchmodels.register_packages(models)
    model_cls = torchmodels.create_model_cls(models, args.model_path)
    model: models.AbstractTDA = model_cls(processor.vocabs)
    model.reset_parameters()
    utils.report_model(logger, model)
    device = torch.device("cpu")
    if args.gpu is not None:
        device = torch.device(f"cuda:{args.gpu}")
    model = model.to(device)

    def create_scheduler(s):
        return utils.PiecewiseScheduler([utils.Coordinate(*t) for t in eval(s)])

    train_args = TrainArguments(
        model=model,
        train_data=tuple(train_data),
        valid_data=tuple(valid_data),
        processor=processor,
        device=device,
        save_dir=save_dir,
        report_every=args.report_every,
        batch_size=args.batch_size,
        valid_batch_size=args.valid_batch_size,
        optimizer=args.optimizer,
        gradient_clip=args.gradient_clip,
        l2norm_weight=args.l2norm_weight,
        learning_rate=args.learning_rate,
        num_epochs=args.epochs,
        kld_schedule=(utils.ConstantScheduler(1.0)
                      if args.kld_schedule is None else
                      create_scheduler(args.kld_schedule)),
        dropout_schedule=(utils.ConstantScheduler(1.0)
                          if args.dropout_schedule is None else
                          create_scheduler(args.dropout_schedule)),
        validate_every=args.validate_every,
        early_stop=args.early_stop,
        early_stop_criterion=args.early_stop_criterion,
        early_stop_patience=args.early_stop_patience,
        disable_kl=args.disable_kl,
        kl_mode=args.kl_mode,
        save_every=args.save_every
    )
    utils.save_json(train_args.to_json(), save_dir.joinpath("args.json"))
    record = train(train_args)
    utils.save_json(record.to_json(), save_dir.joinpath("final-summary.json"))
Example #39
0
def check_perms(ctx, command_name=None, cog_name=None, suppress=False):
    ret = False

    # inits
    GLOBALS = utils.load_yaml(utils.GLOBAL_PERMS_FILE)
    cmd = ctx.command.name if command_name is None else command_name
    if cog_name is None:
        cog = ctx.cog.qualified_name if ctx.cog is not None else "none"
    else:
        cog = cog_name

    # check global admin
    if _is_global_admin(ctx, GLOBALS):
        ret |= True

    # if dm, check dm perms in global_perms file, else guild perms file
    if ctx.guild is None:
        ret |= _check(cmd=cmd,
                      cog=cog,
                      perm_dict=GLOBALS['dm'],
                      flags=GLOBALS['flags'],
                      ctx=ctx,
                      is_dm=True)
        utils.dump_yaml(GLOBALS, utils.GLOBAL_PERMS_FILE)
    else:
        # check guild owner
        if ctx.author.id == ctx.guild.owner.id:
            ret |= True

        # check guild admin
        member = ctx.guild.get_member(ctx.author.id)
        perms = member.permissions_in(ctx.channel)
        if perms.administrator:
            ret |= True

        # load guild perms
        perms_file = f"{utils.PERMS_DIR}{str(ctx.guild.id)}.yaml"
        if os.path.exists(perms_file):
            perms_dict = utils.load_yaml(perms_file)
        else:
            perms_dict = GLOBALS['default_perms']
            utils.dump_yaml(perms_dict, perms_file)

        # check guild perms
        if not suppress and not ret:
            ret |= _check(cmd=cmd,
                          cog=cog,
                          perm_dict=perms_dict,
                          flags=perms_dict['flags'],
                          ctx=ctx,
                          is_dm=False)
        else:
            try:
                ret |= _check(cmd=cmd,
                              cog=cog,
                              perm_dict=perms_dict,
                              flags=perms_dict['flags'],
                              ctx=ctx,
                              is_dm=False)
            except PermissionError:
                ret |= False

        utils.dump_yaml(perms_dict, perms_file)

    return ret
Example #40
0
import argparse

from scripts.emod3d_scripts.check_emod3d_subdomains import test_domain
from utils import load_yaml

parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("nproc")
parser.add_argument("vm_params")
args = parser.parse_args()

params = load_yaml(args.vm_params)
res = test_domain(params["nx"], params["ny"], params["nz"], args.nproc)
for x in res:
    if x.size > 0:
        exit(1)
Example #41
0
 def from_yaml(cls, filepath):
     config = edict(utils.load_yaml(filepath))
     camera_cfg = config.camera_cfg
     video_cfg = config.video_cfg
     return cls(camera_cfg, video_cfg)
Example #42
0
    help="path to morphological-lexicon lexemes.yaml file "
    "(defaults to ../morphological-lexicon/lexemes.yaml)")
argparser.add_argument(
    "--sblgnt", dest="sblgnt_dir", default="../sblgnt",
    help="path to MorphGNT sblgnt directory (defaults to ../sblgnt)")

args = argparser.parse_args()

verses = parse_verse_ranges(args.verses)

if args.exclude:
    exclusions = load_wordset(args.exclude)
else:
    exclusions = set()

lexemes = load_yaml(args.lexemes)

if args.headwords:
    headwords = load_yaml(args.headwords)
else:
    headwords = {}


for entry in get_morphgnt(verses, args.sblgnt_dir):
    if entry[0] == "WORD":
        lexeme = entry[8]
        if lexeme not in exclusions and lexeme not in headwords:
            pos = entry[2]
            if pos in ["N-", "A-"]:
                if "full-citation-form" in lexemes[lexeme]:
                    headword = lexemes[lexeme]["full-citation-form"]
Example #43
0
def __main__():

    #---parser---#
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--configuration', default='configs/simple_spread.yaml', type=str, help='choose condiguration file')
    args = parser.parse_args()

    #---config---#
    config = load_yaml(f'./{args.configuration}')

    #---env---#
    env,scenario,world = make_env(config.env)
    ob = env.reset()
    dim_state = [len(k) for k in ob]
    nber_ag = len(env.agents)
    print(f'{nber_ag} agents. Dimension of states: {dim_state}')

    #---agents---#
    agent = DDPGMultiAgent(config)

    #---writer---#
    run_id = f'{config.env}/lrcritic{config.lr_critic}_lractor{config.lr_actor}_gamma{config.gamma}_h_{config.h}_buffer{config.buffer_limit}_std{config.std}\
                _batch{config.mini_batch_size}_rho{config.rho}_ep{config.nber_ep}_act{config.nber_action}_{str(time.time()).replace(".", "_")}'
    writer = SummaryWriter(f'runs/{run_id}')

    for ep in range(config.nber_ep):

        reward = []
        verbose = (ep % config.freq_verbose == 0)
        test = ep % config.freqTest == 0
        if test:
            maxlength = config.maxLengthTest
        else:
            maxlength = config.maxLengthTrain

        for _ in range(maxlength):

            #--act--#
            a = agent.act(ob, test)             # determine action
            # print('a:', a)
            a_copy = copy.deepcopy(a)
            ob_p, r, d, i = env.step(a)     # play action
            # print('action:', a)
            # print('action_copy:', a_copy)
            # print('ob:', ob)
            # print('ob_p:', ob_p)
            # print('r:', r)
            # print('d:', d)
            # print('info:', i)

            #--store--#
            agent.store(ob, a_copy, ob_p, r, d)  # store in buffer the transition
            # print('RB:', agent.memory)
            reward.append(r)                # keep in record reward
            ob = ob_p                       # update state

            #--train--#
            if (len(agent.memory)>config.mini_batch_size) and (agent.actions % config.freq_optim == 0):
                cl, al = agent.train()
                write_it(writer, cl, al, r, agent.iteration)

            #--visualize--#
            if verbose:
                env.render(mode="none")
        mean_reward = np.array(reward).mean(axis=0)
        ob = env.reset()
        write_epoch(writer, mean_reward, ep, test)
        if ep % 10 == 0:
            print(f'Episode: {ep} - Reward: {mean_reward}')
    env.close()
Example #44
0
 def __init__(self):
     self.settings = load_yaml("SILE.yaml")
Example #45
0
                target = os.path.join(root, 'stopwords.json')
                with open(target,'r') as f:
                    stopwords = json.load(f)
                file_df = []
                target = os.path.join(item['root'], item['name'].rstrip('xml') + 'json')
                for entry in item['df']:
                    file_df.append(entry)
                    t_array = self.NILC_tokenize(entry['t'])
                    h_array = self.NILC_tokenize(entry['h'])
                    
                    t_array = [ x for x in t_array if len(x) > 1 ]
                    h_array = [ x for x in h_array if len(x) > 1 ]

                    t_array = [ x for x in t_array if x not in stopwords ]
                    h_array = [ x for x in h_array if x not in stopwords ]  

                    sentence_dict[entry['t']] = t_array
                    sentence_dict[entry['h']] = h_array
                with open(target, 'w+') as f:
                    json.dump(file_df, f)
        else:
            target = os.path.join(root, 'dictionary.json')
            with open(target,'w+') as f:
                json.dump(sentence_dict, f)
            
if __name__ == '__main__':
    settings = load_yaml("settings.yaml")
    path = settings['preprocessing']['path']
    xml_files = settings['preprocessing']['xml_files']
    tokenizer = Tokenizer(path, xml_files)
    tokenizer.tokenize()
Example #46
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", False)
    test_data_dir = config.get("dygraph.test_data_dir", None)
    epochs = config.get("dygraph.epochs", None)
    print_interval = config.get("dygraph.print_interval", None)
    model_load_path = config.get("dygraph.infer_load_path",
                                 "increment_dygraph")
    start_epoch = config.get("dygraph.infer_start_epoch", -1)
    end_epoch = config.get("dygraph.infer_end_epoch", 1)
    batch_size = config.get('dygraph.batch_size_infer', None)
    margin = config.get('hyper_parameters.margin', 0.1)
    query_len = config.get('hyper_parameters.query_len', 79)
    pos_len = config.get('hyper_parameters.pos_len', 99)
    neg_len = config.get('hyper_parameters.neg_len', 90)

    print("***********************************")
    logger.info(
        "use_gpu: {}, test_data_dir: {}, epochs: {}, print_interval: {}, model_load_path: {}"
        .format(use_gpu, test_data_dir, epochs, print_interval,
                model_load_path))
    print("***********************************")

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    simnet_model = create_model(config)
    # to do init model
    file_list = [
        os.path.join(test_data_dir, x) for x in os.listdir(test_data_dir)
    ]
    print("read data")
    dataset = BQDataset(file_list)
    test_dataloader = create_data_loader(dataset, place=place, config=config)

    epoch_begin = time.time()
    interval_begin = time.time()

    for epoch_id in range(start_epoch + 1, end_epoch):

        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_model(model_path, simnet_model)

        for batch_id, batch in enumerate(test_dataloader()):

            inputs = create_feeds(batch, query_len, pos_len)

            cos_pos, cos_neg = simnet_model(inputs, True)

            if batch_id % print_interval == 0:
                logger.info(
                    "infer epoch: {}, batch_id: {}, query_pt_sim: {}, speed: {:.2f} ins/s"
                    .format(
                        epoch_id, batch_id, cos_pos.numpy(), print_interval *
                        batch_size / (time.time() - interval_begin)))
                interval_begin = time.time()

        logger.info(
            "infer epoch: {} done, query_pt_sim: {}, : epoch time{:.2f} s".
            format(epoch_id, cos_pos.numpy(),
                   time.time() - epoch_begin))
Example #47
0
class MyHandler(socketserver.BaseRequestHandler):
    load_yaml_d = load_yaml('devices.yaml')['devices']

    def task_listen(self):
        port = str(self.server.server_address[1])
        while True:
            try:
                data = self.request.recv(4096)

            except OSError as e:
                logger.error(e)
                break
            if data:
                try:
                    data_list = list(data)
                    logger.debug("接收到的原始数据:{}".format(data_list))
                    newdata = data_verify(data_list)
                    data_map = {}
                    if newdata:
                        values = newdata[7:-2]
                        val_list = addrr_map(port, values, newdata)

                        value_map = {}
                        name_tag_value = []
                        for val in val_list:
                            for key, value in val.items():
                                try:
                                    name_tag_list = self.load_yaml_d[port][key]
                                    name_tag_list.append(value)
                                    newname_tag_list = name_tag_list
                                    name_tag_value.append(newname_tag_list)
                                except Exception:
                                    logger.error("找不到对应的地址{}".format(key))
                                    continue

                        for i in name_tag_value:
                            value_map[i[1]] = i[2]
                            data_map[i[0]] = [{}]
                            data_map[i[0]][0]['ts'] = getTS()
                            data_map[i[0]][0]['values'] = value_map
                            # print(data_map)
                    client.publish(topic="v1/gateway/telemetry",
                                   payload=json.dumps(data_map),
                                   qos=0)
                    client.loop_start()
                    logger.info("数据推送成功!--{}".format(data_map))
                except UnicodeDecodeError:
                    logger.info("接受的数据为空!")
                # sys.stdout.write('\n')

    def task_send(self):
        while True:
            try:
                port = str(self.server.server_address[1])
                if port == '20010':
                    data = self.request.recv(4096)
                    # self.request.send(data)
                else:
                    continue
            except ConnectionResetError:
                continue

    def handle(self):
        print('客户端已连接', self.client_address, self.server.address_family)
        th1 = threading.Thread(target=self.task_listen, daemon=True)
        th2 = threading.Thread(target=self.task_send, daemon=True)

        th1.start()
        th2.start()
        th2.join()
        print('程序结束')
        self.server.shutdown()