def get_xml_api_num():
    api_sum = 0
    url_xml = Config().get('data', 'path') + Config().get('data', 'url_xml')
    res = BeautifulSoup(open(url_xml), 'xml')
    res_list = res.urls.contents
    for i in res_list:
        # print i.string[:1]
        if i != '\n' and i.string[:1] in ['P', 'R']:
            # print i.string
            api_sum += 1
    return api_sum
Esempio n. 2
0
    def __init__(self,
                 masked_attention: bool = False,
                 is_base: bool = True) -> None:
        super().__init__()
        self.masked_attention = masked_attention
        self.config = Config()
        self.config.add_model(is_base)

        self.dim_q: int = self.config.model.model_params.dim_q
        self.dim_k: int = self.config.model.model_params.dim_k
        self.dim_v: int = self.config.model.model_params.dim_v
        self.dim_model: int = self.config.model.model_params.dim_model
        if self.masked_attention:
            assert (
                self.dim_k == self.dim_v
            ), "masked self-attention requires key, and value to be of the same size"
        else:
            assert (
                self.dim_q == self.dim_k == self.dim_v
            ), "self-attention requires query, key, and value to be of the same size"

        self.q_project = nn.Linear(self.dim_model, self.dim_q)
        self.k_project = nn.Linear(self.dim_model, self.dim_k)
        self.v_project = nn.Linear(self.dim_model, self.dim_v)
        self.scale = self.dim_k**-0.5
Esempio n. 3
0
def main(config, comet=False):
    config = Config(config)

    # comet-ml setting
    if comet:
        experiment = Experiment(api_key=config.api_key,
                                project_name=config.project_name,
                                workspace=config.workspace)
        experiment.log_parameters(config)

    # device and dataset setting
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dataset = MCDataset(config.root, config.dataset_name)
    data = dataset[0].to(device)

    config.num_nodes = dataset.num_nodes
    config.num_users = int(data.num_users)
    config.num_relations = dataset.num_relations  # defines number of edge types

    # set and init model
    model = GAE(config, random_init).to(device)
    model.apply(init_xavier)

    # train
    if comet:
        trainer = Trainer(model, dataset, data, calc_rmse, config.epochs,
                          config.lr, config.weight_decay, experiment)
    else:
        trainer = Trainer(model, dataset, data, calc_rmse, config.epochs,
                          config.lr, config.weight_decay)
    trainer.iterate()
Esempio n. 4
0
    def __init__(self, pipe_params: Dict[str, Any] = {}, model_name: str = 'baseline') -> None:
        super().__init__(pipe_params)
        self.pipeline.steps.append(('clf', FlatPredictor()))
        self.pipeline.set_params(**pipe_params)

        self.save_path = Config()['Baseline']['model_path']
        self.model_name = model_name
 def __init__(self, langpair: str, is_base: bool = True) -> None:
     super().__init__()
     self.configs = Config()
     self.configs.add_data(langpair)
     self.configs.add_model(is_base)
     self.langpair = langpair
     self.max_length = self.configs.model.model_params.max_len
Esempio n. 6
0
    def __init__(self, pipe_params: Dict[str, Any] = {}, model_name: str = 'default_gbm') -> None:
        super().__init__(pipe_params)
        self.pipeline.steps.append(('clf', LGBMClassifier()))
        self.pipeline.set_params(**pipe_params)

        self.save_path = Config()['LGBM']['model_path']
        self.model_name = model_name
 def __init__(self, langpair: str, is_base: bool = True) -> None:
     super().__init__()
     self.embedding = Embeddings(langpair)
     self.config = Config()
     self.config.add_model(is_base)
     self.num_layers = self.config.model.model_params.num_decoder_layer
     self.decoder_layers = get_clones(DecoderLayer(), self.num_layers)
Esempio n. 8
0
 def __init__(self, is_base: bool = True, eps: float = 1e-6):
     super().__init__()
     config = Config()
     config.add_model(is_base)
     self.dim_model: int = config.model.model_params.dim_model
     self.gamma = nn.Parameter(torch.ones(self.dim_model))
     self.beta = nn.Parameter(torch.zeros(self.dim_model))
     self.eps = eps
Esempio n. 9
0
 def __init__(self, is_base: bool = True):
     super().__init__()
     config = Config()
     config.add_model(is_base)
     self.dim_model: int = config.model.model_params.dim_model
     self.dim_ff: int = config.model.model_params.dim_ff
     self.linear1 = nn.Linear(self.dim_model, self.dim_ff, bias=True)
     self.ReLU = nn.ReLU()
     self.linear2 = nn.Linear(self.dim_ff, self.dim_model, bias=True)
Esempio n. 10
0
    def _signandpost(self, param, name):
        u"""签名并发送"""
        sig = Encrypt().sign(param)
        param['sign'] = sig
        params_json = json.dumps(param)
        # print params_json
        log(name, params_json, 'info')

        url = ReadXML(Config().get('data', 'url_xml')).get_url(name)
        return self._header().post(url, params_json).content
    def __init__(self, is_base: bool = True):
        super().__init__()
        self.config = Config()
        self.config.add_model(is_base)

        self.masked_mha = MultiHeadAttention(masked_attention=True)
        self.mha = MultiHeadAttention(masked_attention=False)
        self.ln = LayerNorm(self.config.model.train_hparams.eps)
        self.ffn = FeedForwardNetwork()
        self.residual_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)
    def __init__(self, langpair: str, is_base: bool = True) -> None:
        super().__init__()
        configs = Config()
        configs.add_tokenizer(langpair)
        configs.add_model(is_base)
        dim_model: int = configs.model.model_params.dim_model
        vocab_size = configs.tokenizer.vocab_size

        self.encoder = Encoder(langpair)
        self.decoder = Decoder(langpair)
        self.linear = nn.Linear(dim_model, vocab_size)
Esempio n. 13
0
 def __init__(self, masked_attention: bool = False, is_base: bool = True):
     super().__init__()
     self.attention = Attention(masked_attention)
     config = Config()
     config.add_model(is_base)
     self.batch_size = config.model.train_hparams.batch_size
     self.dim_model: int = config.model.model_params.dim_model
     self.dim_v: int = config.model.model_params.dim_v
     self.num_heads = config.model.model_params.num_heads
     assert (self.dim_model // self.num_heads) == self.dim_v
     assert (
         self.dim_model %
         self.num_heads == 0), "embed_dim must be divisible by num_heads"
     self.linear = nn.Linear(self.num_heads * self.dim_v, self.dim_model)
Esempio n. 14
0
    def setUp(self):
        self.current_path = os.path.abspath(os.path.dirname(__file__))
        fullpath = os.path.join(self.current_path, "test_config")

        data = []
        data.append("[Ignore]")
        data.append("list = user1,user2,user3,user4,user5,user6")
        data.append("list3 = ")
        data.append("list4 = user10")
        data.append("")
        data.append("[Channels]")
        data.append("general = 000000000000000000001")
        data.append("test = 000000000000000000002")
        data.append("awesome = 000000000000000000003")
        data.append("asdf = 000000000000000000004")
        data.append("cool = 000000000000000000005")
        data.append("voice = 000000000000000000006")

        with open(fullpath, 'w') as f:
            f.write("\n".join(data))

        self.channel_config = Config(fullpath, "Channels")
        self.ignore_config = Config(fullpath, "Ignore")
Esempio n. 15
0
    def __init__(self, langpair: str, is_base: bool = True) -> None:
        super().__init__()
        # TODO: support transformer-base and transformer-big
        configs = Config()
        configs.add_model(is_base)
        configs.add_tokenizer(langpair)
        tokenizer = load_tokenizer(langpair)
        padding_idx = tokenizer.token_to_id("<pad>")

        self.dim_model: int = configs.model.model_params.dim_model
        self.vocab_size = configs.tokenizer.vocab_size
        self.embedding_matrix = nn.Embedding(self.vocab_size,
                                             self.dim_model,
                                             padding_idx=padding_idx)
        self.scale = self.dim_model**0.5
        self.max_len = configs.model.model_params.max_len
        self.positional_encoding = PositionalEncoding(self.max_len,
                                                      self.dim_model)
Esempio n. 16
0
    def __init__(self,
                 max_len: int,
                 embedding_dim: int,
                 is_base: bool = True) -> None:
        super().__init__()
        config = Config()
        config.add_model(is_base)

        self.dropout = nn.Dropout(p=config.model.model_params.dropout)
        positional_encoding = torch.zeros(max_len, embedding_dim)
        position = torch.arange(0, max_len,
                                dtype=torch.float).unsqueeze(1)  # (max_len, 1)
        div_term = torch.exp(
            torch.arange(0, embedding_dim, 2).float() / embedding_dim *
            math.log(1e4))
        positional_encoding[:, 0::2] = torch.sin(position / div_term)
        positional_encoding[:, 1::2] = torch.cos(position / div_term)
        positional_encoding = positional_encoding.unsqueeze(0).transpose(
            0, 1)  # (max_len, 1, embedding_dim)
        self.register_buffer("positional_encoding",
                             positional_encoding)  # TODO: register_buffer?
Esempio n. 17
0
from src.utils import Config, get_training_data, set_timezone, tag_date_time, sentence_similarity
import nltk
from src.utils import pick_random_test_image, copy_file_to_correct_folder, predict_image_caption, find_bleu_score, process_predicted_tokens

set_timezone()

if __name__ == "__main__":

    transform_test = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    config = Config("config.yaml")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    test_data_loader = get_data_loader(
        transform=transform_test,
        caption_file=config.CAPTION_FILE,
        image_id_file=config.IMAGE_ID_FILE_TEST,
        image_folder=config.IMAGE_DATA_DIR,
        config=config,
        vocab_file=config.VOCAB_FILE,
        mode="test",
    )

    # TODO #2: Specify the saved models to load.
    print(f"DEV MODE: {config.DEV_MODE}")
Esempio n. 18
0
import random
from locust import HttpUser, TaskSet, task, between
from src.utils import Config

TWEETS_PATH = Config()['DATA']['tests_text']
with open(TWEETS_PATH) as f:
    raw_tweets = f.readlines()


class TagTweet(TaskSet):
    @task
    def predict(self):
        tweet = random.choice(raw_tweets)
        request_body = {"text": tweet}
        self.client.post('/tag_tweet', json=request_body)


class TagTweetLoadTest(HttpUser):
    tasks = [TagTweet]
    host = 'http://0.0.0.0:8100'
    stop_timeout = 20
    wait_time = between(1, 5)
Esempio n. 19
0
    def build(cls, path, min_freq=2, fix_len=20, data_path='', **kwargs):
        """
        Build a brand-new Parser, including initialization of all data fields and model parameters.

        Args:
            path (str):
                The path of the model to be saved.
            min_freq (str):
                The minimum frequency needed to include a token in the vocabulary. Default: 2.
            fix_len (int):
                The max length of all subword pieces. The excess part of each piece will be truncated.
                Required if using CharLSTM/BERT.
                Default: 20.
            kwargs (dict):
                A dict holding the unconsumed arguments.

        Returns:
            The created parser.
        """

        train = os.path.join(data_path, "train_approach1")
        args = Config(**locals())
        args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        os.makedirs(os.path.dirname(path), exist_ok=True)
        if os.path.exists(path) and not args.build:
            parser = cls.load(**args)
            parser.model = cls.MODEL(**parser.args)
            parser.model.load_pretrained(parser.WORD.embed).to(args.device)
            return parser

        logger.info("Build the fields")
        WORD = Field('words', pad=pad, unk=unk, bos=bos, eos=eos, lower=True)
        if args.feat == 'char':
            FEAT = SubwordField('chars', pad=pad, unk=unk, bos=bos, eos=eos, fix_len=args.fix_len)
        elif args.feat == 'bert':
            from transformers import AutoTokenizer
            tokenizer = AutoTokenizer.from_pretrained(args.bert)
            FEAT = SubwordField('bert',
                                pad=tokenizer.pad_token,
                                unk=tokenizer.unk_token,
                                bos=tokenizer.cls_token or tokenizer.cls_token,
                                eos=tokenizer.sep_token or tokenizer.sep_token,
                                fix_len=args.fix_len,
                                tokenize=tokenizer.tokenize)
            FEAT.vocab = tokenizer.get_vocab()
        GOLD_METRIC = RawField('golden_metric')
        ORIGINAL_EDU_BREAK = RawField('original_edu_break')
        SENT_BREAK = UnitBreakField('sent_break')
        EDU_BREAK = UnitBreakField('edu_break')
        # CHART = ChartDiscourseField('charts_discourse', pad=pad)
        PARSING_LABEL_TOKEN = ChartDiscourseField('charts_discourse_token')
        PARSING_LABEL_EDU = ChartDiscourseField('charts_discourse_edu')
        PARSING_ORDER_EDU = ParsingOrderField('parsing_order_edu')
        PARSING_ORDER_TOKEN = ParsingOrderField('parsing_order_token')
        PARSING_ORDER_SELF_POINTING_TOKEN = ParsingOrderField('parsing_order_self_pointing_token')

        if args.feat in ('char', 'bert'):
            transform = DiscourseTreeDocEduGold(WORD=(WORD, FEAT), ORIGINAL_EDU_BREAK = ORIGINAL_EDU_BREAK,
                                                GOLD_METRIC=GOLD_METRIC, SENT_BREAK=SENT_BREAK,
                                                EDU_BREAK=EDU_BREAK,
                                                PARSING_LABEL_TOKEN=PARSING_LABEL_TOKEN,
                                                PARSING_LABEL_EDU=PARSING_LABEL_EDU,
                                                PARSING_ORDER_EDU=PARSING_ORDER_EDU,
                                                PARSING_ORDER_TOKEN=PARSING_ORDER_TOKEN,
                                                PARSING_ORDER_SELF_POINTING_TOKEN=PARSING_ORDER_SELF_POINTING_TOKEN
                                                )
        # else:
        #     transform = DiscourseTree(WORD=WORD, EDU_BREAK=EDU_BREAK, GOLD_METRIC=GOLD_METRIC, CHART=CHART, PARSINGORDER=PARSINGORDER)

        train = Dataset(transform, args.train)
        WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
        # WORD.build(train, args.min_freq)
        FEAT.build(train)
        PARSING_LABEL_TOKEN.build(train)
        PARSING_LABEL_EDU.build(train)
        args.update({
            'n_words': WORD.vocab.n_init,
            'n_feats': len(FEAT.vocab),
            'n_labels': len(PARSING_LABEL_TOKEN.vocab),
            'pad_index': WORD.pad_index,
            'unk_index': WORD.unk_index,
            'bos_index': WORD.bos_index,
            'eos_index': WORD.eos_index,
            'feat_pad_index': FEAT.pad_index
        })
        model = cls.MODEL(**args)
        model.load_pretrained(WORD.embed).to(args.device)
        return cls(args, model, transform)
Esempio n. 20
0
    def __init__(self,
                 n_words,
                 n_feats,
                 n_labels,
                 feat='char',
                 n_embed=100,
                 n_feat_embed=100,
                 n_char_embed=50,
                 bert=None,
                 n_bert_layers=4,
                 mix_dropout=.0,
                 embed_dropout=.33,
                 n_lstm_hidden=400,
                 n_lstm_layers=3,
                 lstm_dropout=.33,
                 n_mlp_span=500,
                 n_mlp_label=100,
                 mlp_dropout=.33,
                 feat_pad_index=0,
                 pad_index=0,
                 unk_index=1,
                 **kwargs):
        super().__init__()

        self.args = Config().update(locals())
        # the embedding layer
        self.word_embed = nn.Embedding(num_embeddings=n_words,
                                       embedding_dim=n_embed)
        if feat == 'char':
            self.feat_embed = CharLSTM(n_chars=n_feats,
                                       n_embed=n_char_embed,
                                       n_out=n_feat_embed,
                                       pad_index=feat_pad_index)
        elif feat == 'bert':
            self.feat_embed = BertEmbedding(model=bert,
                                            n_layers=n_bert_layers,
                                            n_out=n_feat_embed,
                                            pad_index=feat_pad_index,
                                            dropout=mix_dropout)
            self.n_feat_embed = self.feat_embed.n_out
        elif feat == 'tag':
            self.feat_embed = nn.Embedding(num_embeddings=n_feats,
                                           embedding_dim=n_feat_embed)
        else:
            raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].")
        self.embed_dropout = IndependentDropout(p=embed_dropout)

        # the lstm layer
        self.token_lstm = BiLSTM(input_size=n_embed + n_feat_embed,
                                 hidden_size=n_lstm_hidden,
                                 num_layers=n_lstm_layers,
                                 dropout=lstm_dropout)
        self.token_lstm_dropout = SharedDropout(p=lstm_dropout)

        self.edu_lstm = BiLSTM(input_size=n_lstm_hidden * 2,
                               hidden_size=n_lstm_hidden,
                               num_layers=n_lstm_layers,
                               dropout=lstm_dropout)
        self.edu_lstm_dropout = SharedDropout(p=lstm_dropout)
        self.decoder_layers = n_lstm_layers

        self.mlp_span_splitting = MLP(n_in=n_lstm_hidden * 2,
                                      n_out=n_mlp_span,
                                      dropout=mlp_dropout)

        self.pad_index = pad_index
        self.unk_index = unk_index
        self.hx_dense = nn.Linear(2 * n_lstm_hidden, 2 * n_lstm_hidden)
Esempio n. 21
0
    def __init__(self,
                 n_words,
                 n_feats,
                 n_labels,
                 feat='char',
                 n_embed=100,
                 n_feat_embed=100,
                 n_char_embed=50,
                 bert=None,
                 n_bert_layers=4,
                 mix_dropout=.0,
                 embed_dropout=.33,
                 n_lstm_hidden=400,
                 n_lstm_layers=3,
                 lstm_dropout=.33,
                 n_mlp_span=500,
                 n_mlp_label=100,
                 mlp_dropout=.33,
                 feat_pad_index=0,
                 pad_index=0,
                 unk_index=1,
                 **kwargs):
        super().__init__()

        self.args = Config().update(locals())
        # the embedding layer
        self.word_embed = nn.Embedding(num_embeddings=n_words,
                                       embedding_dim=n_embed)
        if feat == 'char':
            self.feat_embed = CharLSTM(n_chars=n_feats,
                                       n_embed=n_char_embed,
                                       n_out=n_feat_embed,
                                       pad_index=feat_pad_index)
        elif feat == 'bert':
            if kwargs['bert_requires_grad'] == 'False':
                bert_requires_grad = False
            elif kwargs['bert_requires_grad'] == 'True':
                bert_requires_grad = True
            if bert_requires_grad:
                self.feat_embed = BertEmbeddingfinetuning(model=bert,
                                                          n_layers=n_bert_layers,
                                                          n_out=n_feat_embed,
                                                          pad_index=feat_pad_index,
                                                          dropout=mix_dropout)
            else:
                self.feat_embed = BertEmbedding(model=bert,
                                                n_layers=n_bert_layers,
                                                n_out=n_feat_embed,
                                                pad_index=feat_pad_index,
                                                dropout=mix_dropout)
            self.n_feat_embed = self.feat_embed.n_out
        elif feat == 'tag':
            self.feat_embed = nn.Embedding(num_embeddings=n_feats,
                                           embedding_dim=n_feat_embed)
        else:
            raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].")
        self.embed_dropout = IndependentDropout(p=embed_dropout)

        # the lstm layer
        self.lstm = BiLSTM(input_size=n_embed + n_feat_embed,
                           hidden_size=n_lstm_hidden,
                           num_layers=n_lstm_layers,
                           dropout=lstm_dropout)
        self.lstm_dropout = SharedDropout(p=lstm_dropout)
        self.decoder_layers = n_lstm_layers
        # the MLP layers
        # self.mlp_span_l = MLP(n_in=n_lstm_hidden*2,
        #                       n_out=n_mlp_span,
        #                       dropout=mlp_dropout)
        # self.mlp_span_r = MLP(n_in=n_lstm_hidden*2,
        #                       n_out=n_mlp_span,
        #                       dropout=mlp_dropout)
        self.mlp_span_splitting = MLP(n_in=n_lstm_hidden * 2,
                                      n_out=n_mlp_span,
                                      dropout=mlp_dropout)
        self.mlp_label_l = MLP(n_in=n_lstm_hidden * 2,
                               n_out=n_mlp_label,
                               dropout=mlp_dropout)
        self.mlp_label_r = MLP(n_in=n_lstm_hidden * 2,
                               n_out=n_mlp_label,
                               dropout=mlp_dropout)

        # the Biaffine layers
        # self.span_attn = Biaffine(n_in=n_mlp_span,
        #                           bias_x=True,
        #                           bias_y=False)
        self.label_attn = Biaffine(n_in=n_mlp_label,
                                   n_out=n_labels,
                                   bias_x=True,
                                   bias_y=True)
        # self.crf = CRFConstituency()
        # self.criterion = nn.CrossEntropyLoss()
        self.pad_index = pad_index
        self.unk_index = unk_index
        self.hx_dense = nn.Linear(2 * n_lstm_hidden, 2 * n_lstm_hidden)
Esempio n. 22
0
# from tools.read_xls import ReadXls

from xlrd import open_workbook

from src.utils import Config

wbpath = Config().get('data', 'path')
XLSX = ['zhigou.xlsx', 'merchantcenter.xlsx', 'personalcenter.xlsx', 'shopping.xlsx']
count = 0
for i in XLSX:
    print 'Case file is: " {0} ",'.format(i),
    book = wbpath + i
    wb = open_workbook(book)
    sheets = wb.nsheets
    print 'there are {0} Sheets in this file. Details:'.format(sheets)
    for j in range(sheets):
        sheet = wb.sheet_by_index(j)
        print '    Case Num in Sheet {0} is {1}'.format(sheet.name, (sheet.nrows - 2))
        count += (sheet.nrows - 2)
    print

print 'Total Num of Cases:{0}'.format(count)
Esempio n. 23
0
from comet_ml import Experiment

import torch
from torchvision import transforms

from src.dataset import Places2
from src.model import PConvUNet
from src.loss import InpaintingLoss, VGG16FeatureExtractor
from src.train import Trainer
from src.utils import Config, load_ckpt, create_ckpt_dir


# set the config
config = Config("config.yml")
config.ckpt = create_ckpt_dir()
print("Check Point is '{}'".format(config.ckpt))

# Define the used device
device = torch.device("cuda:{}".format(config.cuda_id)
                      if torch.cuda.is_available() else "cpu")

# Define the model
print("Loading the Model...")
model = PConvUNet(finetune=config.finetune,
                  layer_size=config.layer_size)
if config.finetune:
    model.load_state_dict(torch.load(config.finetune)['model'])
model.to(device)


# Data Transformation
Esempio n. 24
0
"""Module and script to process load data."""
from datetime import timedelta, datetime

import click
import pandas as pd
import numpy as np
import pycountry

from src.conversion import watt_to_watthours
from src.utils import Config


@click.command()
@click.argument('path_to_raw_load')
@click.argument('path_to_output_data')
@click.argument('config', type=Config())
def process_data(path_to_raw_load, path_to_output_data, config):
    """Extracts national energy demand 2017 from raw data."""
    data = read_load_profiles(
        path_to_raw_load=path_to_raw_load,
        start=datetime(2017, 1, 1),
        end=datetime(2018, 1, 1),
        country_codes_iso2=[pycountry.countries.lookup(country).alpha_2
                            for country in config["scope"]["countries"]]
    )
    watt_to_watthours(data.mean(), timedelta(days=365)).div(1000).div(1000).to_csv(
        path_to_output_data,
        header=["twh_per_year"],
        index_label="country_code"
    )
Esempio n. 25
0
    def load(cls, path, **kwargs):
        r"""
        Load data fields and model parameters from a pretrained parser.

        Args:
            path (str):
                - a string with the shortcut name of a pre-trained parser defined in supar.PRETRAINED
                  to load from cache or download, e.g., `crf-dep-en`.
                - a path to a directory containing a pre-trained parser, e.g., `./<path>/model`.
            kwargs (dict):
                A dict holding the unconsumed arguments.

        Returns:
            The loaded parser.
        """

        args = Config(**locals())
        args.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        if os.path.exists(path):
            state = torch.load(path, map_location=args.device)

        args = state['args'].update(args)
        args.device = 'cpu'

        model = cls.MODEL(**args)

        # print(cls.WORD.embed)
        # model.load_pretrained(cls.WORD.embed).to(args.device)
        # parser = cls.load(**args)
        # parser.model = cls.MODEL(**parser.args)
        # parser.model.load_pretrained(parser.WORD.embed).to(args.device)
        # print(parser.WORD.embed)

        # parser.model.to(args.device)

        # if os.path.exists(path):  # and not args.build:
        #     parser = cls.load(**args)
        #     parser.model = cls.MODEL(**parser.args)
        #     parser.model.load_pretrained(parser.WORD.embed).to(args.device)
        #     return parser

        # parser = cls.load(**args)

        # print(parser.CHART)
        # print(vars(parser.CHART.vocab))

        transform = state['transform']

        if state['pretrained']:
            model.load_pretrained(state['pretrained']).to(args.device)
        else:
            parser = cls(args, model, transform)
            model.load_pretrained(parser.WORD.embed).to(args.device)

        # print(state['state_dict'])

        model.load_state_dict(state['state_dict'])
        model.eval()
        model.to(args.device)

        parser.model = model
        parser.args = args
        parser.transform = transform

        if parser.args.feat in ('char', 'bert'):
            parser.WORD, parser.FEAT = parser.transform.WORD
        else:
            parser.WORD, parser.FEAT = parser.transform.WORD, parser.transform.POS
        parser.EDU_BREAK = parser.transform.EDU_BREAK
        parser.GOLD_METRIC = parser.transform.GOLD_METRIC
        # self.TREE = self.transform.TREE
        try:
            parser.CHART = parser.transform.CHART
            parser.PARSINGORDER = parser.transform.PARSINGORDER
        except:
            print(
                'parser.CHART and parser.PARSINGORDER parameters are not available for this model.'
            )

        return parser
Esempio n. 26
0
# -*- coding: utf-8 -*-
import json
import unittest

from test.API_test.common.BaseCaseOperate import BaseCaseOperate
from test.API_test.common.Merchant import Merchant

from src.utils import Config
from src.utils import ReadXML

url_xml = Config().get('data', 'url_xml')


def generator(datafile='shopping.xlsx',
              sheet_name='sheet0',
              userid=None,
              prepare=False):
    u"""生成测试用例方法,传入data文件与sheet名,得到class"""
    class TestStuff(unittest.TestCase):
        #  基本类,同一类型的接口都可以用这个类来组织测试用例
        def setUp(self):
            u"""在setup中新生成一个user与merchant"""
            self.url = ReadXML(url_xml).get_url(sheet_name)
            print u'接口地址:{0}'.format(self.url)
            if userid is None:
                self.merchant = Merchant()
                self.merchant.add()
                self.userid = self.merchant.getuserid()
            else:
                self.userid = userid
Esempio n. 27
0
from comet_ml import Experiment

import torch
from torchvision import transforms

from src.dataset import Places2
from src.model import PConvUNet
from src.loss import InpaintingLoss, VGG16FeatureExtractor
from src.train import Trainer
from src.utils import Config, load_ckpt, create_ckpt_dir


# set the config
config = Config("default_config.yml")
config.ckpt = create_ckpt_dir()
print("Check Point is '{}'".format(config.ckpt))

# Define the used device
device = torch.device("cuda:{}".format(config.cuda_id)
                      if torch.cuda.is_available() else "cpu")

# Define the model
print("Loading the Model...")
model = PConvUNet(finetune=config.finetune,
                  layer_size=config.layer_size)
if config.finetune:
    model.load_state_dict(torch.load(config.finetune)['model'])
model.to(device)


# Data Transformation
Esempio n. 28
0
    train_dataloader, val_dataloader = get_dataloader(cfg)

    # Create model
    Model = getattr(__import__("src"), cfg.model.name)
    runner = Model(cfg.model.params)

    # Set trainer (pytorch lightening)
    os.makedirs(cfg.model.ckpt.path, exist_ok=True)
    trainer = pl.Trainer(
        logger=wandb_logger,
        gpus=-1 if torch.cuda.is_available() else 0,
        max_epochs=cfg.model.params.max_epochs,
        deterministic=True,
        checkpoint_callback=ModelCheckpoint(cfg.model.ckpt.path),
    )

    # Train
    trainer.fit(
        runner, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
    )


if __name__ == "__main__":
    args = parse_args()

    cfg = Config()
    cfg.add_dataset(args.cfg_dataset)
    cfg.add_model(args.cfg_model)

    run(cfg, args.wandb)
Esempio n. 29
0

@click.command()
@click.argument("path_to_units")
@click.argument("path_to_eez")
@click.argument("path_to_shared_coast")
@click.argument("path_to_capacities_pv_prio")
@click.argument("path_to_capacities_wind_prio")
@click.argument("path_to_electricity_yield_pv_prio")
@click.argument("path_to_electricity_yield_wind_prio")
@click.argument("path_to_eligibility_categories")
@click.argument("path_to_land_cover")
@click.argument("path_to_protected_areas")
@click.argument("path_to_result")
@click.argument("scenario")
@click.argument("config", type=Config())
def potentials(path_to_units, path_to_eez, path_to_shared_coast,
               path_to_capacities_pv_prio, path_to_capacities_wind_prio,
               path_to_electricity_yield_pv_prio,
               path_to_electricity_yield_wind_prio,
               path_to_eligibility_categories, path_to_land_cover,
               path_to_protected_areas, path_to_result, scenario, config):
    """Determine potential of renewable electricity in each administrative unit.

    * Take the (only technically restricted) raster data potentials,
    * add restrictions based on scenario definitions,
    * allocate the onshore potentials to the administrative units,
    * allocate the offshore potentials to exclusive economic zones (EEZ),
    * allocate the offshore potential of EEZ to units based on the fraction of shared coast.
    """
    with rasterio.open(path_to_eligibility_categories, "r") as src:
Esempio n. 30
0
 def test_create_file(self):
     new_file = os.path.join(self.current_path, "new_file")
     self.assertFalse(os.path.isfile(new_file))
     Config(new_file, "MySection")
     self.assertTrue(os.path.isfile(new_file))
     os.remove(new_file)