Ejemplo n.º 1
0
def create(devices, setup_params, refs, duts, monitors):
    mons = []
    mhosts = []
    hosts = duts + refs

    # choose only standalone monitors
    for monitor in monitors:
        if monitor not in hosts and monitor != "all":
            mons.append(monitor)

    for mon in mons:
        dev = config.get_device(devices, mon)
        if dev is None:
            continue

        host = Host(host=dev['hostname'],
                    ifname=dev['ifname'],
                    port=dev['port'],
                    name=dev['name'])

        try:
            host.execute(["iw", "reg", "set", setup_params['country']])
            rutils.setup_hw_host(host, setup_params, True)
        except:
            pass
        mhosts.append(host)

    return mhosts
Ejemplo n.º 2
0
def create(devices, setup_params, refs, duts, monitors):
    mons = []
    mhosts = []
    hosts = duts + refs

    # choose only standalone monitors
    for monitor in monitors:
        if monitor not in hosts and monitor != "all":
            mons.append(monitor)

    for mon in mons:
        dev = config.get_device(devices, mon)
        if dev is None:
            continue

        host = Host(host=dev['hostname'],
                    ifname=dev['ifname'],
                    port=dev['port'],
                    name=dev['name'])

        try:
            host.execute(["iw", "reg", "set", setup_params['country']])
            rutils.setup_hw_host(host, setup_params, True)
        except:
            pass
        mhosts.append(host)

    return mhosts
Ejemplo n.º 3
0
    def __init__(self, model_fun, cfg):
        self.cfg = cfg
        self._prune_weight = cfg.prune.weight
        self._prune_ratio = cfg.prune.ratio
        self._new_cfg = cfg.prune.new_cfg
        self._num_workers = cfg.system.num_workers
        self._device = get_device(cfg.system.gpus)
        state_dict = torch.load(self._prune_weight, map_location=self._device)
        model = model_fun()
        new_model = model_fun()
        model.load_state_dict(state_dict['model'])
        new_model.load_state_dict(state_dict['model'])
        print('load weights from %s' % self._prune_weight)
        self.model = model
        self.new_model = new_model
        self.blocks = []
        self._pruned_weight = self._prune_weight.rsplit('.', 1)[0] + '-pruned.pt'

        self.block_map = {
            'convolutional': PB.Conv2d,
            'maxpool': PB.Pool,
            'avgpool': PB.Pool,
            'upsample': PB.Upsample,
            'yolo': PB.YOLO,
            'shortcut': PB.ShortCut,
            'scale_channels': PB.ScaleChannels,
            'route': PB.Route
        }
Ejemplo n.º 4
0
def get_host(devices, dev_name):
    dev = config.get_device(devices, dev_name)
    host = Host(host=dev['hostname'],
                ifname=dev['ifname'],
                port=dev['port'],
                name=dev['name'])
    host.dev = dev
    return host
Ejemplo n.º 5
0
def get_host(devices, dev_name):
    dev = config.get_device(devices, dev_name)
    host = Host(host=dev['hostname'],
                ifname=dev['ifname'],
                port=dev['port'],
                name=dev['name'])
    host.dev = dev
    return host
Ejemplo n.º 6
0
    def __init__(self, config):
        # metric
        self.AP = None
        # model
        self._cfg_path = config.model.cfg_path
        # train
        self._train_batch_size = config.train.batch_size
        self._scheduler_type = config.train.scheduler
        self._mile_stones = config.train.mile_stones
        self._gamma = config.train.gamma
        self._init_lr = config.train.learning_rate_init
        self._end_lr = config.train.learning_rate_end
        self._weight_decay = config.train.weight_decay
        self._warmup_epochs = config.train.warmup_epochs
        self._max_epochs = config.train.max_epochs
        # weights
        self._backbone_weight = config.weight.backbone
        self._weights_dir = os.path.join(config.weight.dir,
                                         config.experiment_name)
        self._resume_weight = config.weight.resume
        self._clear_history = config.weight.clear_history
        self._weight_base_name = 'model'
        # eval
        self._eval_after = config.eval.after
        # sparse
        self._sparse_train = config.sparse.switch
        self._sparse_ratio = config.sparse.ratio
        # prune
        self._prune_ratio = config.prune.ratio
        # quant
        self._quant_train = config.quant.switch
        self._quant_backend = config.quant.backend
        self._disable_observer_after = config.quant.disable_observer_after
        self._freeze_bn_after = config.quant.freeze_bn_after
        # system
        self._gpus = fix_gpus(config.system.gpus)
        self._num_workers = config.system.num_workers
        self._device = get_device(self._gpus)

        self.init_eopch = 0
        self.global_step = 0
        self.config = config

        self.dataload_tt = TicToc()
        self.model_tt = TicToc()
        self.epoch_tt = TicToc()

        self.scheduler = {
            'cosine': self.scheduler_cosine,
            'step': self.scheduler_step,
        }[self._scheduler_type]
Ejemplo n.º 7
0
def get_devices(devices, duts, refs, monitors):
    for dut in duts:
        config.get_device(devices, dut, lock=True)
    for ref in refs:
        config.get_device(devices, ref, lock=True)
    for monitor in monitors:
        if monitor == "all":
            continue
        if monitor in duts:
            continue
        if monitor in refs:
            continue
        config.get_device(devices, monitor, lock=True)
Ejemplo n.º 8
0
def get_devices(devices, duts, refs, monitors):
    for dut in duts:
        config.get_device(devices, dut, lock=True)
    for ref in refs:
        config.get_device(devices, ref, lock=True)
    for monitor in monitors:
        if monitor == "all":
            continue
        if monitor in duts:
            continue
        if monitor in refs:
            continue
        config.get_device(devices, monitor, lock=True)
Ejemplo n.º 9
0
def create(devices, setup_params, refs, duts, monitors):
    mons = []
    mhosts = []
    hosts = duts + refs

    # choose only standalone monitors
    for monitor in monitors:
        if monitor not in hosts and monitor != "all":
            mons.append(monitor)

    for mon in mons:
        word = mon.split(":")
        dev = config.get_device(devices, word[0])
        if dev is None:
            continue

        host = Host(host=dev['hostname'],
                    ifname=dev['ifname'],
                    port=dev['port'],
                    name=dev['name'])

        for iface_param in word[1:]:
            params = iface_param.split(",")
            if len(params) > 3:
                monitor_param = {
                    "freq": rutils.c2f(params[0]),
                    "bw": params[1],
                    "center_freq1": rutils.c2f(params[2]),
                    "center_freq2": rutils.c2f(params[3])
                }
                host.monitor_params.append(monitor_param)

        try:
            host.execute(["iw", "reg", "set", setup_params['country']])
            rutils.setup_hw_host(host, setup_params, True)
        except:
            pass
        mhosts.append(host)

    return mhosts
Ejemplo n.º 10
0
from data_preprocess import tensorFromSentence
from result_show import showAttention

######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#

MAX_LENGTH = config.get_max_len()
device = config.get_device()
SOS_TOKEN = config.get_sos_token()
EOS_TOKEN = config.get_eos_token()

def evaluate(encoder, decoder,
             input_lang, output_lang,
             sentence,
             max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = tensorFromSentence(input_lang, sentence)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
import torch
from config import get_device
device = get_device()


class Evaluator():
    def __init__(self, model):
        self.model = model

    def evaluate(self,
                 eva_X,
                 eva_y,
                 loss_func,
                 keep_loss=True,
                 keep_accuracy=True):
        loop_cnt = 0
        if keep_loss:
            loss_sum = 0
        if keep_accuracy:
            accu_list = []

        while True:
            X, y = eva_X.next_batch(), eva_y.next_batch()
            if X is None:
                assert y is None
                eva_X.rewind()
                eva_y.rewind()
                break
            probs = self.model(*X)
            if keep_loss:
                loss = loss_func(probs, y)
from config import ModelParamsConfig as var
from config import get_device
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
import pandas as pd
from tqdm import tqdm
from typing import List

tqdm.pandas()
torch_device = get_device()


def get_response(input_text, num_return_sequences, tokenizer, model) -> List[str]:


    batch = tokenizer.prepare_seq2seq_batch([input_text],
                                            truncation=True,
                                            padding='longest',
                                            return_tensors="pt").to(torch_device)
    translated = model.generate(**batch,
                                num_beams=num_return_sequences,
                                num_return_sequences=num_return_sequences,
                                temperature=1.5).to(torch_device)
    tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
    return tgt_text


def execute_pegasus_augmentation(data, file_path) -> pd.DataFrame:
    MODEL_NAME = var.PARAPHRASING_MODEL
    tokenizer = PegasusTokenizer.from_pretrained(MODEL_NAME)
    model = PegasusForConditionalGeneration.from_pretrained(MODEL_NAME).to(torch_device)
    train = data.copy()