Esempio n. 1
0
 def unregister(self):
     """
     Unregister this system from the insights service
     """
     machine_id = generate_machine_id()
     try:
         logger.debug("Unregistering %s", machine_id)
         url = self.api_url + "/v1/systems/" + machine_id
         net_logger.info("DELETE %s", url)
         self.session.delete(url)
         logger.info(
             "Successfully unregistered from the Red Hat Insights Service")
         write_unregistered_file()
         get_scheduler().remove_scheduling()
         return True
     except requests.ConnectionError as e:
         logger.debug(e)
         logger.error("Could not unregister this system")
         return False
Esempio n. 2
0
    def __init__(self, model, config, batchiter_acoustic, batchiter_train,
                 batchiter_dev):
        self.config = config
        self.batchiter_acoustic = batchiter_acoustic
        self.batchiter_train = batchiter_train
        self.batchiter_dev = batchiter_dev

        self.model = model
        if config["multi_gpu"] == True:
            self.model_to_pack = self.model.module
        else:
            self.model_to_pack = self.model

        self.device = torch.device(
            'cuda:0') if torch.cuda.is_available() else torch.device('cpu')

        self.num_epoch = config["num_epoch"]
        self.exp_dir = config["exp_dir"]
        self.print_inteval = config["print_inteval"]

        self.accumulate_grad_batch = config["accumulate_grad_batch"]
        self.init_lr = config["init_lr"]
        self.grad_max_norm = config["grad_max_norm"]
        self.label_smooth = config["label_smooth"]
        self.lambda_qua = config["lambda_qua"]
        self.lambda_ctc = config["lambda_ctc"]

        self.num_last_ckpt_keep = None
        if "num_last_ckpt_keep" in config:
            self.num_last_ckpt_keep = config["num_last_ckpt_keep"]

        self.lr_scheduler = schedule.get_scheduler(config["lr_scheduler"])
        # Solver state
        self.epoch = 0
        self.step = 0
        self.tr_loss = []
        self.cv_loss = []
        self.lr = self.init_lr

        if config["optimtype"] == "sgd":
            self.optimizer = torch.optim.SGD(self.model_to_pack.parameters(),
                                             lr=self.lr,
                                             momentum=0.9)
        elif config["optimtype"] == "adam":
            self.optimizer = torch.optim.Adam(self.model_to_pack.parameters(),
                                              lr=self.lr,
                                              betas=(0.9, 0.999),
                                              eps=1e-08,
                                              weight_decay=0)
        else:
            raise ValueError("Unknown optimizer.")
        if not os.path.isdir(self.exp_dir):
            os.makedirs(self.exp_dir)
Esempio n. 3
0
    def __init__(self, model, config, tr_loader, cv_loader):
        self.config = config
        self.tr_loader = tr_loader
        self.cv_loader = cv_loader

        self.model = model
        if config['multi_gpu'] == True:
            self.model_to_pack = self.model.module
        else:
            self.model_to_pack = self.model

        self.device = torch.device(
            'cuda:0') if torch.cuda.is_available() else torch.device('cpu')

        self.num_epoch = config['num_epoch']
        self.exp_dir = config['exp_dir']
        self.print_inteval = config['print_inteval']

        self.accumulate_grad_batch = config['accumulate_grad_batch']
        self.init_lr = config['init_lr']
        self.grad_max_norm = config['grad_max_norm']
        self.label_smooth = config['label_smooth']

        self.num_last_ckpt_keep = None
        if "num_last_ckpt_keep" in config:
            self.num_last_ckpt_keep = config['num_last_ckpt_keep']

        self.lr_scheduler = schedule.get_scheduler(config['lr_scheduler'])
        # Solver state
        self.epoch = 0
        self.step = 0
        self.tr_loss = []
        self.cv_loss = []
        self.lr = self.init_lr

        if config['optimtype'] == "sgd":
            self.optimizer = torch.optim.SGD(self.model_to_pack.parameters(),
                                             lr=self.lr,
                                             momentum=0.9)
        elif config['optimtype'] == "adam":
            self.optimizer = torch.optim.Adam(self.model_to_pack.parameters(),
                                              lr=self.lr,
                                              betas=(0.9, 0.999),
                                              eps=1e-08,
                                              weight_decay=0)
        else:
            raise ValueError("Unknown optimizer.")
        if not os.path.isdir(self.exp_dir):
            os.makedirs(self.exp_dir)
Esempio n. 4
0
 def save_model(self, request, obj, form, change):
     if obj.switch:
         sche = schedule.get_scheduler()
         try:
             sche.remove_job(str(obj.id))
         except JobLookupError:
             pass
         sche.add_job(schedule.crawl_task,
                      'interval',
                      id=str(obj.id),
                      seconds=obj.seconds,
                      max_instances=obj.thread_num,
                      args=[obj],
                      name=obj.task_name,
                      jobstore="redis")
     obj.save()
Esempio n. 5
0
    def __init__(self, model, config, tr_loader, cv_loader):
        self.config = config
        self.tr_loader = tr_loader
        self.cv_loader = cv_loader
        self.model = model
        if config["multi_gpu"] == True:
            self.model_to_pack = self.model.module
        else:
            self.model_to_pack = self.model

        self.num_epoch = config["num_epoch"]
        self.exp_dir = config["exp_dir"]
        self.print_inteval = config["print_inteval"]

        self.accumulate_grad_batch = config["accumulate_grad_batch"]
        self.init_lr = config["init_lr"]
        self.grad_max_norm = config["grad_max_norm"]
        self.label_smooth = config["label_smooth"]

        self.num_last_ckpt_keep = None
        if "num_last_ckpt_keep" in config:
            self.num_last_ckpt_keep = config["num_last_ckpt_keep"]

        self.lr_scheduler = schedule.get_scheduler(config["lr_scheduler"])
        self.metric_summarizer = metric.MetricSummarizer()
        self.metric_summarizer.register_metric("per_token_loss",
                                               display=True,
                                               visual=True,
                                               optim=True)
        self.metric_summarizer.register_metric("avg_token_loss",
                                               display=True,
                                               visual=True,
                                               optim=False)
        self.metric_summarizer.register_metric("per_token_acc",
                                               display=True,
                                               visual=True,
                                               optim=False)
        self.metric_summarizer.register_metric("avg_token_acc",
                                               display=True,
                                               visual=True,
                                               optim=False)
        self.metric_summarizer.register_metric("learning_rate",
                                               display=True,
                                               visual=True,
                                               optim=False)
        self.metric_summarizer.register_metric("token_per_sec",
                                               display=True,
                                               visual=True,
                                               optim=False)

        if utils.TENSORBOARD_LOGGING == 1:
            utils.visualizer.set_writer(os.path.join(self.exp_dir, "log"))

        # trainer state
        self.epoch = 0
        self.step = 0
        self.tr_loss = []
        self.cv_loss = []
        self.lr = self.init_lr

        if config["optimtype"] == "sgd":
            self.optimizer = torch.optim.SGD(self.model_to_pack.parameters(),
                                             lr=self.lr,
                                             momentum=0.9)
        elif config["optimtype"] == "adam":
            self.optimizer = torch.optim.Adam(self.model_to_pack.parameters(),
                                              lr=self.lr,
                                              betas=(0.9, 0.999),
                                              eps=1e-08,
                                              weight_decay=0)
        else:
            raise ValueError("Unknown optimizer.")

        if not os.path.isdir(self.exp_dir):
            os.makedirs(self.exp_dir)

        if utils.TENSORBOARD_LOGGING:
            (ids, labels,
             paddings) = next(iter(self.cv_loader))  # use a longer one
            if next(self.model_to_pack.parameters()).is_cuda:
                ids = ids.cuda()
                labels = labels.cuda()
                paddings = paddings.cuda()
            self.data_for_vis = (ids, labels, paddings)
Esempio n. 6
0
def job_resume(request):
    id = request.GET.get("id_")
    sche = schedule.get_scheduler()
    sche.resume_job(id)
    return redirect(schedule_view)
Esempio n. 7
0
import time
import websocket
import json

import schedule
import global_variable
import core

if __name__ == '__main__':

    if global_variable.get_slacker().rtm.connect():
        response = global_variable.get_slacker().rtm.start()
        sock_endpoint = response.body['url']
        slack_socket = websocket.create_connection(sock_endpoint)

        scheduler = schedule.get_scheduler()
        scheduler.start()
        schedule.set_scheduler(scheduler)
        schedule.get_scheduler().add_job(func=schedule.process_reserve,
                                         trigger='interval',
                                         seconds=30)

        while True:
            msg = json.loads(slack_socket.recv())

            if (len(msg) > 0):
                res = core.parse_msg(msg)
                if (res is not None):
                    global_variable.get_slacker().chat.post_message(
                        channel=msg['channel'], text=res, username="******")