Example #1
0
def main(mode):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device: {}".format(device))
    config = load_config()
    model_path = Path("3_models") / config.general["model_path"]
    Trainer = import_class(config.general["trainer"])
    Generator = import_class(config.general["generator"])
    Discriminator = import_class(config.general["discriminator"])

    G = Generator(config.model)
    D = Discriminator(config.model)
    if mode == "train":
        trainer = Trainer(config.training, G, D, device)
        trainer.train()
        save_model(G, D, model_path)
    elif mode == "eval":
        load_model(G, D, model_path)
        eval_model(config.training, G, device)
    elif mode == "show_sample":  # show samples generated by G in the given epoch
        print("Show samples in epoch {}".format(opt.epoch))
        show_sample(config.training, epoch=opt.epoch)
    elif mode == "draw_loss":
        plot_loss(config.training)
    elif mode == "save_model":  # save model from checkpoint
        checkpoint_path = Path("2_experiments") / config.training["exp_dir"] / "ckp_{:0>4d}.pth".format(opt.epoch)
        print("Save model from {}".format(checkpoint_path))
        save_model_from_checkpoint(G, D, checkpoint_path, model_path)
    elif mode == "pack":  # pack all the sample files to a zip file
        pack_sample_files(Path("2_experiments") / config.training["exp_dir"])
    else:
        print("Unknown Mode")
Example #2
0
def shell_factory(loader, global_conf, **local_conf):
    app_factory = local_conf.pop('paste.app_factory')
    shell_class = local_conf.pop('shell_class')
    shells = local_conf.pop('shell').split()
    root_path = os.path.dirname(global_conf[FILE_PATH])
    shell_class_kw = _get_shell_kwargs(local_conf)
    sh = import_class(shell_class)(**shell_class_kw)
    sh.load_root(root_path)
    conf = as_config(global_conf[FILE_PATH])
    local_conf[URL_PATH] = global_conf.pop(URL_PATH, '/')
    for shell in shells:
        sh_conf = dict()
        for k, v in conf.items('shell:{0}'.format(shell)):
            sh_conf[k] = v
        models = sh_conf.pop('models', None)
        models = models.split() if models else []
        for model in models:
            mod_conf = dict()
            mod_conf.update(sh_conf)
            for k, v in conf.items('model:{0}'.format(model)):
                mod_conf[k] = v
            model_kwargs = _get_model_kwargs(mod_conf)
            model = loader.get_app(model, global_conf=global_conf)
            mod = import_class(model, root_path)
            mod = partial(mod, **model_kwargs)
            sh.load_model(mod,
                          local_conf=mod_conf,
                          global_conf=global_conf,
                          relative_to=global_conf[FILE_PATH])
    local_conf['shell'] = sh

    app = _load_factory(app_factory, global_conf, **local_conf)
    return app
Example #3
0
def get_package_parser_class(software_platform):
    if software_platform in [PlatformFamily.ASR9K, PlatformFamily.CRS]:
        return import_class('parsers.platforms.IOSXR.CLIPackageParser')
    elif software_platform in [PlatformFamily.NCS6K, PlatformFamily.ASR9K_X64]:
        return import_class('parsers.platforms.eXR.CLIPackageParser')
    else:
        raise UnknownSoftwarePlatform('%s' % software_platform)
Example #4
0
def scheduler_process(queue, ppid):
    """ Процесс для работы планировщика """

    ram_class = import_class(settings.SCHEDULER_RAM_STORAGE)
    persist_class = import_class(settings.SCHEDULER_PERSISTENT_STORAGE)
    scheduler = Scheduler(
        ram_storage=ram_class(), 
        persist_storage=persist_class(
            settings.SCHEDULER_DB_PERSISTENT_STORAGE,
            pool_recycle=settings.POOL_RECYCLE,
            pool_size=settings.POOL_SIZE
        )
    )

    # Thread for immediate running of incoming commands
    Thread(target=inc_message_thread, args=(queue, scheduler, )).start()
    
    logging.info("Restoring previous events...")
    scheduler.restore()
    logging.info("Restoring completed.")

    period = 15 if settings.DEBUG is False else 5
    start_time = time.time()
    counter = 0

    while 1:
        # Kill himself if parent is killed
        if os.getppid() != ppid:
            sys.exit()

        #do something
        events = scheduler.get_events()

        for event in events:
            scheduler.mark_as_processing(event.event_id)

            logging.debug("Event (id=%s) marked as processing", event.event_id)
            logging.debug("Sending event (id=%s) to posting queue...", event.event_id)

            message = dict(event_id=event.event_id)
            message.update(event.data)
            send_to_queue(settings.RABBITMQ_HOST, settings.POSTING_QUEUE, message)

        allowance = time.time() - counter*period - start_time
        logging.debug(" [x] allowance = %s, counter = %s, per cent of period = %s", allowance, counter, int(allowance/period * 100))

        if allowance > period/3.0:
            if period - allowance >= 0.0:
                time.sleep(period - allowance)
            else:
                time.sleep(0.1)

            counter = -1
            start_time = time.time()
        else:
            time.sleep(period)

        counter += 1
Example #5
0
 def apply(self, system, collection=None):
   for operation in self.operations:
     operation.apply(system)
   system.last_transaction = self
   if collection is None:
     system.save()
   else:
     System = import_class("System")
     Node = import_class("Node")
     with prefix_collections(collection, System, Node):
       system.save()
Example #6
0
    def load_data(self):
        Feeder = import_class(self.arg.feeder)
        self.data_loader = dict()

        def worker_seed_fn(worker_id):
            # give workers different seeds
            return init_seed(self.arg.seed + worker_id + 1)

        rank = int(os.environ['RANK'])
        world_size = torch.cuda.device_count()
        if self.arg.phase == 'train':
            dataset_train = Feeder(**self.arg.train_feeder_args)
            sampler_train = DistributedSampler(dataset_train,
                                               world_size,
                                               rank,
                                               shuffle=True)
            self.data_loader['train'] = torch.utils.data.DataLoader(
                dataset=dataset_train,
                batch_size=self.arg.batch_size // world_size,
                sampler=sampler_train,
                shuffle=False,
                num_workers=self.arg.num_worker // world_size,
                drop_last=True,
                worker_init_fn=worker_seed_fn)

        dataset_test = Feeder(**self.arg.test_feeder_args)
        # sampler_test = DistributedSampler(dataset_test, world_size, rank, shuffle=False)
        self.data_loader['test'] = torch.utils.data.DataLoader(
            dataset=dataset_test,
            batch_size=self.arg.test_batch_size // world_size,
            # sampler=sampler_test,
            shuffle=False,
            num_workers=self.arg.num_worker // world_size,
            drop_last=False,
            worker_init_fn=worker_seed_fn)
Example #7
0
 def load_data(self):
     print("Loading data")
     Feeder = import_class(self.arg.dataloader)
     self.data_loader = dict()
     if self.arg.train_loader_args != {}:
         self.data_loader['train'] = torch.utils.data.DataLoader(
             dataset=Feeder(**self.arg.train_loader_args),
             batch_size=self.arg.batch_size,
             shuffle=True,
             drop_last=True,
             num_workers=self.arg.num_worker,
         )
     if self.arg.valid_loader_args != {}:
         self.data_loader['valid'] = torch.utils.data.DataLoader(
             dataset=Feeder(**self.arg.valid_loader_args),
             batch_size=self.arg.test_batch_size,
             shuffle=False,
             drop_last=False,
             num_workers=self.arg.num_worker,
         )
     if self.arg.test_loader_args != {}:
         test_dataset = Feeder(**self.arg.test_loader_args)
         self.stat.test_size = len(test_dataset)
         self.data_loader['test'] = torch.utils.data.DataLoader(
             dataset=test_dataset,
             batch_size=self.arg.test_batch_size,
             shuffle=False,
             drop_last=False,
             num_workers=self.arg.num_worker,
         )
     print("Loading data finished.")
def parse_param_types(param_dict, parse_level=0, max_parse_level=0, soft=True):
    for param, param_val in param_dict.items():
        if param not in run_spec:
            if soft == False:
                raise (ValueError(
                    "Param '{}' is not in the run spec. Please add it to run_spec.yaml."
                    .format(param)))
        else:
            param_spec = run_spec[param]
            if "type" in param_spec:
                if param_spec["type"] == "Object" or param_spec[
                        "type"] == "runnable":
                    param_dict[param] = eval(param_val)
                elif param_spec["type"] == "import":
                    param_dict[param] = import_class(param_val)
                elif type(param_val) == dict:
                    # recursively parse dicts if not at max specified depth (this is so we don't have to define library specific specs)
                    if parse_level < max_parse_level:
                        param_dict[param] = parse_param_types(
                            param_val, parse_level + 1, max_parse_level)
                    # let it be otherwise (we return below)
                else:
                    param_dict[param] = eval(param_spec["type"])(param_val)
            else:
                if soft == False:
                    raise (ValueError(
                        "No type specified for param '{}'. Please add it to run_spec.yaml"
                        .format(param)))
    return param_dict
Example #9
0
    def load_data(self):
        Feeder = import_class(self.arg.feeder)
        self.data_loader = dict()
        train_feeder = Feeder(**self.arg.train_feeder_args)
        test_feeder = Feeder(**self.arg.test_feeder_args)
        if self.arg.mgpu:
            num_replicas = dist.get_world_size()
            rank = dist.get_rank()

            train_sampler = DistributedSampler(train_feeder,
                                               num_replicas=num_replicas,
                                               rank=rank)

            self.data_loader['train'] = torch.utils.data.DataLoader(
                train_feeder,
                batch_size=int(self.arg.batch_size / num_replicas),
                sampler=train_sampler,
                num_workers=self.arg.num_worker,
                drop_last=False,
                worker_init_fn=init_seed)
        else:
            self.data_loader['train'] = torch.utils.data.DataLoader(
                dataset=train_feeder,
                batch_size=self.arg.batch_size,
                shuffle=True,
                num_workers=self.arg.num_worker,
                drop_last=True,
                worker_init_fn=init_seed)
        self.data_loader['test'] = torch.utils.data.DataLoader(
            dataset=test_feeder,
            batch_size=self.arg.test_batch_size,
            shuffle=False,
            num_workers=self.arg.num_worker,
            drop_last=False,
            worker_init_fn=init_seed)
Example #10
0
    def get_protocol_factory(self):
        """Creates the instance of the protocol factory for a given COMaster"""

        prtocol_factory = get_setting('POLL_PROTOCOL_FACTORY',
                                      'protocols.mara.client.MaraClientProtocolFactory')
        pf_class = import_class(prtocol_factory)

        instance = pf_class(self)
        return instance
Example #11
0
def load_namespaces():
    namespace_module = getattr(django_settings, "NAMESPACE_MODULE", None)
    if namespace_module:
        try:
            namespaces = import_class(namespace_module).namespaces
            return namespaces
        except Exception, ex:
            print ex
            pass
Example #12
0
def infer(conf_dict, args):
    """
    run predict
    """
    logging.info("start test process ...")
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()

    with fluid.dygraph.guard(place):
        vocab = utils.load_vocab(args.vocab_path)
        simnet_process = reader.SimNetProcessor(args, vocab)
        get_infer_examples = simnet_process.get_infer_reader
        infer_loader = fluid.io.DataLoader.from_generator(
            capacity=16,
            return_list=True,
            iterable=True,
            use_double_buffer=True)
        infer_loader.set_sample_list_generator(
            paddle.batch(get_infer_examples, batch_size=args.batch_size),
            place)

        conf_dict['dict_size'] = len(vocab)
        conf_dict['seq_len'] = args.seq_len

        net = utils.import_class("./nets", conf_dict["net"]["module_name"],
                                 conf_dict["net"]["class_name"])(conf_dict)
        model, _ = load_dygraph(args.init_checkpoint)
        net.set_dict(model)

        pred_list = []
        if args.task_mode == "pairwise":
            for left, pos_right in infer_loader():
                left = fluid.layers.reshape(left, shape=[-1, 1])
                pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])

                left_feat, pos_score = net(left, pos_right)
                pred = pos_score
                pred_list += list(
                    map(lambda item: str((item[0] + 1) / 2), pred.numpy()))

        else:
            for left, right in infer_loader():
                left = fluid.layers.reshape(left, shape=[-1, 1])
                pos_right = fluid.layers.reshape(right, shape=[-1, 1])
                left_feat, pred = net(left, right)
                pred_list += map(lambda item: str(np.argmax(item)),
                                 pred.numpy())

        with io.open(args.infer_result_path, "w",
                     encoding="utf8") as infer_file:
            for _data, _pred in zip(simnet_process.get_infer_data(),
                                    pred_list):
                infer_file.write(_data + "\t" + _pred + "\n")
        logging.info("infer result saved in %s" %
                     os.path.join(os.getcwd(), args.infer_result_path))
Example #13
0
def load_detection_data_sampler(sampler_module):
    split = sampler_module.split('.')

    # META_ARCHITECTURE can only be like "projects.FashionNet.fashionnet.**" \
    # or use the detectron2 sampler
    if len(split) > 1:
        sampler_meta = import_class(sampler_module)
        return sampler_meta

    return samplers
Example #14
0
    def __init__(self,
                 num_class,
                 num_point,
                 num_person,
                 num_gcn_scales,
                 num_g3d_scales,
                 graph,
                 in_channels=3):
        super(Model, self).__init__()

        Graph = import_class(graph)
        A_binary = Graph().A_binary

        self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)

        # channels
        c1 = 96
        c2 = c1 * 2  # 192
        c3 = c2 * 2  # 384

        # r=3 STGC blocks
        self.gcn3d1 = MultiWindow_MS_G3D(3,
                                         c1,
                                         A_binary,
                                         num_g3d_scales,
                                         window_stride=1)
        self.sgcn1 = nn.Sequential(
            MS_GCN(num_gcn_scales, 3, c1, A_binary, disentangled_agg=True),
            MS_TCN(c1, c1), MS_TCN(c1, c1))
        self.sgcn1[-1].act = nn.Identity()
        self.tcn1 = MS_TCN(c1, c1)

        self.gcn3d2 = MultiWindow_MS_G3D(c1,
                                         c2,
                                         A_binary,
                                         num_g3d_scales,
                                         window_stride=2)
        self.sgcn2 = nn.Sequential(
            MS_GCN(num_gcn_scales, c1, c1, A_binary, disentangled_agg=True),
            MS_TCN(c1, c2, stride=2), MS_TCN(c2, c2))
        self.sgcn2[-1].act = nn.Identity()
        self.tcn2 = MS_TCN(c2, c2)

        self.gcn3d3 = MultiWindow_MS_G3D(c2,
                                         c3,
                                         A_binary,
                                         num_g3d_scales,
                                         window_stride=2)
        self.sgcn3 = nn.Sequential(
            MS_GCN(num_gcn_scales, c2, c2, A_binary, disentangled_agg=True),
            MS_TCN(c2, c3, stride=2), MS_TCN(c3, c3))
        self.sgcn3[-1].act = nn.Identity()
        self.tcn3 = MS_TCN(c3, c3)

        self.fc = nn.Linear(c3, num_class)
Example #15
0
 def load_model(self):
     Model = import_class(self.arg.model)
     model = Model(**self.arg.model_args)
     if self.arg.mgpu:
         self.model = DistributedDataParallel(
             model.cuda(),
             device_ids=[self.arg.local_rank],
             find_unused_parameters=True)
     else:
         self.model = nn.DataParallel(model).cuda()
     self.loss = nn.CrossEntropyLoss().cuda()
Example #16
0
    def runner(self):
        def parse_losses(losses):
            log_vars = OrderedDict()
            for loss_name, loss_value in losses.items():
                if isinstance(loss_value, torch.Tensor):
                    log_vars[loss_name] = loss_value.mean()
                elif isinstance(loss_value, list):
                    log_vars[loss_name] = sum(_loss.mean()
                                              for _loss in loss_value)
                else:
                    raise TypeError(
                        '{} is not a tensor or list of tensors'.format(
                            loss_name))

            loss = sum(_value for _key, _value in log_vars.items()
                       if 'loss' in _key)

            log_vars['loss'] = loss
            for name in log_vars:
                log_vars[name] = log_vars[name].item()

            return loss, log_vars

        def batch_processor(model, data, train_mode):
            losses = model(**data)
            # losses = model(data)
            loss, log_vars = parse_losses(losses)
            outputs = dict(loss=loss,
                           log_vars=log_vars,
                           num_samples=len(data['batchdata'].data))
            return outputs

        self.runner = Runner(self.model, batch_processor, self.optimizer,
                             self.arg.work_dir)
        optimizer_config = DistOptimizerHook(
            grad_clip=dict(max_norm=20, norm_type=2))
        if not "policy" in self.arg.policy:
            lr_config = dict(policy='step', step=self.arg.step)
        else:
            lr_config = dict(**self.arg.policy)
        checkpoint_config = dict(interval=5)
        log_config = dict(interval=20,
                          hooks=[
                              dict(type='TextLoggerHook'),
                              dict(type='TensorboardLoggerHook')
                          ])
        self.runner.register_training_hooks(lr_config, optimizer_config,
                                            checkpoint_config, log_config)
        self.runner.register_hook(DistSamplerSeedHook())
        Feeder = import_class(self.arg.feeder)
        self.runner.register_hook(
            DistEvalTopKAccuracyHook(Feeder(**self.arg.test_feeder_args),
                                     interval=self.arg.test_interval,
                                     k=(1, 5)))
Example #17
0
    def get_consumers(self):
        """
            Create two consumers for each message processor: one
            for the outgoing message queue, and one for the incoming message
            queue.
        """

        consumers = {}

        # import dynamically (use the import path in the settings file) all
        # message processor then create one instance for each of them
        mps = (import_class(mp) for mp in settings.MESSAGE_PROCESSORS)
        self.message_processors = [mp() for mp in mps]

        # Just a log loop to say that we do
        mps = (mp.rsplit('.', 1)[1] for mp in settings.MESSAGE_PROCESSORS)
        self.logger.info('Loading message processors: %s' % ', '.join(mps))

        # Create the consumer for incoming messages and attach the callback
        # of each message processor
        queue = self.queues['incoming_messages']
        c = consumers['incoming_messages'] = Consumer(self.channel, queue)

        for mp in self.message_processors:
            c.register_callback(mp.handle_incoming_message)

        c.consume()

        # Create the consumer for incoming messages and attach the callback
        # of each message processor
        # then attach a router callback that is going to relay the message
        # to the proper transport queue
        queue = self.queues['outgoing_messages']
        c = consumers['outgoing_messages'] = Consumer(self.channel, queue)

        for mp in self.message_processors:
            c.register_callback(mp.handle_outgoing_message)

        c.register_callback(self.relay_message_to_transport)
        c.consume()

        # Create the consumer for the log messages and attach a callback
        # from the SMS router: all messages sent to this queue are going
        # to be logged in the router log
        consumers['logs'] = Consumer(self.channel, self.queues['logs'])
        consumers['logs'].register_callback(self.handle_log)
        consumers['logs'].consume()

        # attach a fall back functions to handle message that kombu can't deliver
        queue = self.queues['undelivered_kombu_message']
        c = consumers['undeliverd_kombu_messages'] = Consumer(
            self.channel, queue)
        c.register_callback(self.handle_undelivered_kombu_message)
        c.consume()
    def get_context_data(self, *args, **kwargs):
        widget_classes = {}

        for widget_class_path in WIDGET_CLASSES:
            widget_class = import_class(widget_class_path)
            widget_class.META = widget_class._meta
            widget_classes[widget_class_path] = widget_class

        return {
            'widget_classes': widget_classes,
            'form': self.get_object(),
        }
Example #19
0
    def get_transports(self):
        """
            Return a dict of message transports instances as describes in
            the settings file
        """
        transports = {}
        for name, transport in settings.MESSAGE_TRANSPORTS.iteritems():

            klass = import_class(transport['backend'])
            transports[name] = klass(name, 'send_messages',
                                     **transport.get('options', {}))
        return transports
Example #20
0
def _load_factory(factory_line, global_conf, **local_conf):
    model, cls = factory_line.split(':')
    cls = cls.split('.')
    if len(cls) > 1:
        func = cls[1]
    else:
        func = 'factory'
    model = '.'.join([model, cls[0]])
    middleware = import_class(model)
    func = getattr(middleware, func)
    if callable(func):
        return func(global_conf, **local_conf)
Example #21
0
def import_response_intent(intent_name):
    """
    @params intent_name intent name from yaml
    @params sub_path place with intent fiels and respose folders
     such as `_profile`.
    """

    intent_name, sub_path = get_intent_sub_path(intent_name)

    path = "raven.response"
    path = f"{path}.{sub_path}._{intent_name}"
    path = f"{path}.{intent_name}.{_title_case(intent_name)}"
    return import_class(path)
Example #22
0
    def load_model(self):
        output_device = self.arg.device[0] if type(
            self.arg.device) is list else self.arg.device
        self.output_device = output_device
        Model = import_class(self.arg.model)

        # Copy model file and main
        shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
        shutil.copy2(os.path.join('.', __file__), self.arg.work_dir)

        self.model = Model(**self.arg.model_args).cuda(output_device)
        self.loss = nn.CrossEntropyLoss().cuda(output_device)
        self.print_log(
            f'Model total number of params: {count_params(self.model)}')

        if self.arg.weights:
            try:
                self.global_step = int(arg.weights[:-3].split('-')[-1])
            except:
                print('Cannot parse global_step from model weights filename')
                self.global_step = 0

            self.print_log(f'Loading weights from {self.arg.weights}')
            if '.pkl' in self.arg.weights:
                with open(self.arg.weights, 'r') as f:
                    weights = pickle.load(f)
            else:
                weights = torch.load(self.arg.weights)

            weights = OrderedDict(
                [[k.split('module.')[-1],
                  v.cuda(output_device)] for k, v in weights.items()])

            for w in self.arg.ignore_weights:
                if weights.pop(w, None) is not None:
                    self.print_log(f'Sucessfully Remove Weights: {w}')
                else:
                    self.print_log(f'Can Not Remove Weights: {w}')

            try:
                self.model.load_state_dict(weights)
            except:
                state = self.model.state_dict()
                diff = list(set(state.keys()).difference(set(weights.keys())))
                self.print_log('Can not find these weights:')
                for d in diff:
                    self.print_log('  ' + d)
                state.update(weights)
                self.model.load_state_dict(state)
Example #23
0
    def load_model(self):
        output_device = self.arg.device[0] if type(
            self.arg.device) is list else self.arg.device
        self.output_device = output_device
        Model = import_class(self.arg.model)

        # Copy model file and main
        shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
        shutil.copy2(os.path.join('.', __file__), self.arg.work_dir)

        # cpu
        self.model = Model(**self.arg.model_args)
        # gpu
        # self.model = Model(**self.arg.model_args).cuda(output_device)
        # self.loss = nn.CrossEntropyLoss().cuda(output_device)
        self.print_log(
            f'Model total number of params: {count_params(self.model)}')
Example #24
0
 def Loading(self):
     self.device.set_device(self.arg.device)
     print("Loading model")
     if self.arg.model:
         model_class = import_class(self.arg.model)
         model = self.device.model_to_device(
             model_class(**self.arg.model_args))
         if self.arg.weights:
             try:
                 print("Loading pretrained model...")
                 state_dict = torch.load(self.arg.weights)
                 for w in self.arg.ignore_weights:
                     if state_dict.pop(w, None) is not None:
                         print('Sucessfully Remove Weights: {}.'.format(w))
                     else:
                         print('Can Not Remove Weights: {}.'.format(w))
                 model.load_state_dict(state_dict, strict=True)
                 optimizer = Optimizer(model, self.arg.optimizer_args)
             except RuntimeError:
                 print("Loading from checkpoint...")
                 state_dict = torch.load(self.arg.weights)
                 self.rng.set_rng_state(state_dict['rng_state'])
                 self.arg.optimizer_args[
                     'start_epoch'] = state_dict["epoch"] + 1
                 self.recoder.print_log(
                     "Resuming from checkpoint: epoch {}".format(
                         self.arg.optimizer_args['start_epoch']))
                 model = self.device.load_weights(model, self.arg.weights,
                                                  self.arg.ignore_weights)
                 optimizer = Optimizer(model, self.arg.optimizer_args)
                 optimizer.optimizer.load_state_dict(
                     state_dict["optimizer_state_dict"])
                 optimizer.scheduler.load_state_dict(
                     state_dict["scheduler_state_dict"])
         else:
             optimizer = Optimizer(model, self.arg.optimizer_args)
     else:
         raise ValueError("No Models.")
     print("Loading model finished.")
     self.load_data()
     return model, optimizer
    def get_form(self, form_class):
        pk = self.request.GET.get('pk', None)
        widget_class = self.request.GET.get('widget_class', None)

        if pk:
            self.object = Widget.objects.filter(pk=pk
                ).select_subclasses()[0]
        else:
            if widget_class not in WIDGET_CLASSES:
                return

            tab = Tab.objects.get(pk=self.request.GET['tab_id'])

            widget_class = import_class(widget_class)

            self.object = widget_class(tab=tab)

        rules_light.require(self.request.user, 'form_designer.form.update',
            self.object.tab.form)

        return self.object.configuration_form_instance(self.request)
Example #26
0
def _serve(settings):
    autoreload_setting = getattr(settings, "AUTORELOAD", django_settings.DEBUG)
    host = settings.HOST
    port = settings.PORT
    print
    print 'Listening on %s:%s' % (settings.HOST, settings.PORT)
    print "Autoreload:", autoreload_setting and "ON" or "OFF"
    print

    data = {}

    keyfile = getattr(settings, "KEYFILE", None)
    certfile = getattr(settings, "CERTFILE", None)
    if keyfile and certfile:
        data.update(dict(keyfile=keyfile, certfile=certfile))

    app_module = getattr(settings, "APPLICATION", None)
    if not app_module:
        app = Application(settings.NAMESPACES)
    else:
        app = import_class(app_module)(settings.NAMESPACES)

    CrossOriginSocketIOServer((host, port), app, policy_server=False,
                              **data).serve_forever()
Example #27
0
    def load_data(self):
        Feeder = import_class(self.arg.feeder)
        self.data_loader = dict()

        def worker_seed_fn(worker_id):
            # give workers different seeds
            return init_seed(self.arg.seed + worker_id + 1)

        if self.arg.phase == 'train':
            self.data_loader['train'] = torch.utils.data.DataLoader(
                dataset=Feeder(**self.arg.train_feeder_args),
                batch_size=self.arg.batch_size,
                shuffle=True,
                num_workers=self.arg.num_worker,
                drop_last=True,
                worker_init_fn=worker_seed_fn)

        self.data_loader['test'] = torch.utils.data.DataLoader(
            dataset=Feeder(**self.arg.test_feeder_args),
            batch_size=self.arg.test_batch_size,
            shuffle=False,
            num_workers=self.arg.num_worker,
            drop_last=False,
            worker_init_fn=worker_seed_fn)
Example #28
0
    @_clear_and_retry_on_attribute_error
    def _record_binary_annotation(self, annotation):
        self.binary_annotations.append(annotation)

    @_clear_and_retry_on_attribute_error
    def get_annotations(self):
        return self.annotations

    @_clear_and_retry_on_attribute_error
    def get_binary_annotations(self):
        return self.binary_annotations

    @_clear_and_retry_on_attribute_error
    def set_rpc_name(self, name):
        self.rpc_name = name

    @_clear_and_retry_on_attribute_error
    def get_rpc_name(self):
        return self.rpc_name

    @classmethod
    def clear(cls):
        cls.zipkin_data = ZipkinData()
        cls.annotations = []
        cls.binary_annotations = []
        cls.rpc_name = None


default = import_class(settings.ZIPKIN_DATA_STORE_CLASS)()
Example #29
0
def infer(conf_dict, args):
    """
    run predict
    """
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    vocab = utils.load_vocab(args.vocab_path)
    simnet_process = reader.SimNetProcessor(args, vocab)

    startup_prog = fluid.Program()

    get_infer_examples = simnet_process.get_infer_reader
    batch_data = fluid.io.batch(get_infer_examples,
                                args.batch_size,
                                drop_last=False)

    test_prog = fluid.Program()

    conf_dict['dict_size'] = len(vocab)

    net = utils.import_class("../shared_modules/models/matching",
                             conf_dict["net"]["module_name"],
                             conf_dict["net"]["class_name"])(conf_dict)

    if args.task_mode == "pairwise":
        with fluid.program_guard(test_prog, startup_prog):
            with fluid.unique_name.guard():
                infer_loader, left, pos_right = create_model(args,
                                                             is_inference=True)
                left_feat, pos_score = net.predict(left, pos_right)
                pred = pos_score
        test_prog = test_prog.clone(for_test=True)
    else:
        with fluid.program_guard(test_prog, startup_prog):
            with fluid.unique_name.guard():
                infer_loader, left, right = create_model(args,
                                                         is_inference=True)
                left_feat, pred = net.predict(left, right)
        test_prog = test_prog.clone(for_test=True)

    exe.run(startup_prog)

    utils.init_checkpoint(exe, args.init_checkpoint, main_program=test_prog)

    test_exe = exe
    infer_loader.set_sample_list_generator(batch_data)

    logging.info("start test process ...")
    preds_list = []
    fetch_list = [pred.name]
    output = []
    infer_loader.start()
    while True:
        try:
            output = test_exe.run(program=test_prog, fetch_list=fetch_list)
            if args.task_mode == "pairwise":
                preds_list += list(
                    map(lambda item: str((item[0] + 1) / 2), output[0]))
            else:
                preds_list += map(lambda item: str(np.argmax(item)), output[0])
        except fluid.core.EOFException:
            infer_loader.reset()
            break
    with io.open(args.infer_result_path, "w", encoding="utf8") as infer_file:
        for _data, _pred in zip(simnet_process.get_infer_data(), preds_list):
            infer_file.write(_data + "\t" + _pred + "\n")
    logging.info("infer result saved in %s" %
                 os.path.join(os.getcwd(), args.infer_result_path))
Example #30
0
def test(conf_dict, args):
    """
    Evaluation Function
    """
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    vocab = utils.load_vocab(args.vocab_path)
    simnet_process = reader.SimNetProcessor(args, vocab)

    startup_prog = fluid.Program()

    get_test_examples = simnet_process.get_reader("test")
    batch_data = fluid.io.batch(get_test_examples,
                                args.batch_size,
                                drop_last=False)
    test_prog = fluid.Program()

    conf_dict['dict_size'] = len(vocab)

    net = utils.import_class("../shared_modules/models/matching",
                             conf_dict["net"]["module_name"],
                             conf_dict["net"]["class_name"])(conf_dict)

    metric = fluid.metrics.Auc(name="auc")

    with io.open("predictions.txt", "w", encoding="utf8") as predictions_file:
        if args.task_mode == "pairwise":
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, pos_right = create_model(
                        args, is_inference=True)
                    left_feat, pos_score = net.predict(left, pos_right)
                    pred = pos_score
            test_prog = test_prog.clone(for_test=True)

        else:
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, right = create_model(args,
                                                            is_inference=True)
                    left_feat, pred = net.predict(left, right)
            test_prog = test_prog.clone(for_test=True)

        exe.run(startup_prog)

        utils.init_checkpoint(exe,
                              args.init_checkpoint,
                              main_program=test_prog)

        test_exe = exe
        test_loader.set_sample_list_generator(batch_data)

        logging.info("start test process ...")
        test_loader.start()
        pred_list = []
        fetch_list = [pred.name]
        output = []
        while True:
            try:
                output = test_exe.run(program=test_prog, fetch_list=fetch_list)
                if args.task_mode == "pairwise":
                    pred_list += list(
                        map(lambda item: float(item[0]), output[0]))
                    predictions_file.write(u"\n".join(
                        map(lambda item: str((item[0] + 1) / 2), output[0])) +
                                           "\n")
                else:
                    pred_list += map(lambda item: item, output[0])
                    predictions_file.write(u"\n".join(
                        map(lambda item: str(np.argmax(item)), output[0])) +
                                           "\n")
            except fluid.core.EOFException:
                test_loader.reset()
                break
        if args.task_mode == "pairwise":
            pred_list = np.array(pred_list).reshape((-1, 1))
            pred_list = (pred_list + 1) / 2
            pred_list = np.hstack(
                (np.ones_like(pred_list) - pred_list, pred_list))
        else:
            pred_list = np.array(pred_list)
        labels = simnet_process.get_test_label()

        metric.update(pred_list, labels)
        if args.compute_accuracy:
            acc = utils.get_accuracy(pred_list, labels, args.task_mode,
                                     args.lamda)
            logging.info("AUC of test is %f, Accuracy of test is %f" %
                         (metric.eval(), acc))
        else:
            logging.info("AUC of test is %f" % metric.eval())

    if args.verbose_result:
        utils.get_result_file(args)
        logging.info("test result saved in %s" %
                     os.path.join(os.getcwd(), args.test_result_path))
Example #31
0
def train(conf_dict, args):
    """
    train processic
    """
    # loading vocabulary
    vocab = utils.load_vocab(args.vocab_path)
    # get vocab size
    conf_dict['dict_size'] = len(vocab)
    # Load network structure dynamically
    net = utils.import_class("../shared_modules/models/matching",
                             conf_dict["net"]["module_name"],
                             conf_dict["net"]["class_name"])(conf_dict)
    # Load loss function dynamically
    loss = utils.import_class("../shared_modules/models/matching/losses",
                              conf_dict["loss"]["module_name"],
                              conf_dict["loss"]["class_name"])(conf_dict)
    # Load Optimization method
    optimizer = utils.import_class(
        "../shared_modules/models/matching/optimizers", "paddle_optimizers",
        conf_dict["optimizer"]["class_name"])(conf_dict)
    # load auc method
    metric = fluid.metrics.Auc(name="auc")
    # Get device
    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    startup_prog = fluid.Program()
    train_program = fluid.Program()

    # used for continuous evaluation
    if args.enable_ce:
        SEED = 102
        startup_prog.random_seed = SEED
        train_program.random_seed = SEED

    simnet_process = reader.SimNetProcessor(args, vocab)
    if args.task_mode == "pairwise":
        # Build network
        with fluid.program_guard(train_program, startup_prog):
            with fluid.unique_name.guard():
                train_loader, left, pos_right, neg_right = create_model(args)
                left_feat, pos_score = net.predict(left, pos_right)
                pred = pos_score
                _, neg_score = net.predict(left, neg_right)
                avg_cost = loss.compute(pos_score, neg_score)
                avg_cost.persistable = True
                optimizer.ops(avg_cost)

        # Get Reader
        get_train_examples = simnet_process.get_reader("train",
                                                       epoch=args.epoch)
        if args.do_valid:
            test_prog = fluid.Program()
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, pos_right = create_model(
                        args, is_inference=True)
                    left_feat, pos_score = net.predict(left, pos_right)
                    pred = pos_score
            test_prog = test_prog.clone(for_test=True)

    else:
        # Build network
        with fluid.program_guard(train_program, startup_prog):
            with fluid.unique_name.guard():
                train_loader, left, right, label = create_model(
                    args, is_pointwise=True)
                left_feat, pred = net.predict(left, right)
                avg_cost = loss.compute(pred, label)
                avg_cost.persistable = True
                optimizer.ops(avg_cost)

        # Get Feeder and Reader
        get_train_examples = simnet_process.get_reader("train",
                                                       epoch=args.epoch)
        if args.do_valid:
            test_prog = fluid.Program()
            with fluid.program_guard(test_prog, startup_prog):
                with fluid.unique_name.guard():
                    test_loader, left, right = create_model(args,
                                                            is_inference=True)
                    left_feat, pred = net.predict(left, right)
            test_prog = test_prog.clone(for_test=True)

    if args.init_checkpoint is not "":
        utils.init_checkpoint(exe, args.init_checkpoint, startup_prog)

    def valid_and_test(test_program, test_loader, get_valid_examples, process,
                       mode, exe, fetch_list):
        """
        return auc and acc
        """
        # Get Batch Data
        batch_data = fluid.io.batch(get_valid_examples,
                                    args.batch_size,
                                    drop_last=False)
        test_loader.set_sample_list_generator(batch_data)
        test_loader.start()
        pred_list = []
        while True:
            try:
                _pred = exe.run(program=test_program, fetch_list=[pred.name])
                pred_list += list(_pred)
            except fluid.core.EOFException:
                test_loader.reset()
                break
        pred_list = np.vstack(pred_list)
        if mode == "test":
            label_list = process.get_test_label()
        elif mode == "valid":
            label_list = process.get_valid_label()
        if args.task_mode == "pairwise":
            pred_list = (pred_list + 1) / 2
            pred_list = np.hstack(
                (np.ones_like(pred_list) - pred_list, pred_list))
        metric.reset()
        metric.update(pred_list, label_list)
        auc = metric.eval()
        if args.compute_accuracy:
            acc = utils.get_accuracy(pred_list, label_list, args.task_mode,
                                     args.lamda)
            return auc, acc
        else:
            return auc

    # run train
    logging.info("start train process ...")
    # set global step
    global_step = 0
    ce_info = []
    train_exe = exe
    #for epoch_id in range(args.epoch):
    # used for continuous evaluation
    if args.enable_ce:
        train_batch_data = fluid.io.batch(get_train_examples,
                                          args.batch_size,
                                          drop_last=False)
    else:
        train_batch_data = fluid.io.batch(fluid.io.shuffle(get_train_examples,
                                                           buf_size=10000),
                                          args.batch_size,
                                          drop_last=False)
    train_loader.set_sample_list_generator(train_batch_data)
    train_loader.start()
    exe.run(startup_prog)
    losses = []
    start_time = time.time()
    while True:
        try:
            global_step += 1
            fetch_list = [avg_cost.name]
            avg_loss = train_exe.run(program=train_program,
                                     fetch_list=fetch_list)
            losses.append(np.mean(avg_loss[0]))
            if args.do_valid and global_step % args.validation_steps == 0:
                get_valid_examples = simnet_process.get_reader("valid")
                valid_result = valid_and_test(test_prog, test_loader,
                                              get_valid_examples,
                                              simnet_process, "valid", exe,
                                              [pred.name])
                if args.compute_accuracy:
                    valid_auc, valid_acc = valid_result
                    logging.info(
                        "global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f"
                        % (global_step, valid_auc, valid_acc, np.mean(losses)))
                else:
                    valid_auc = valid_result
                    logging.info(
                        "global_steps: %d, valid_auc: %f, valid_loss: %f" %
                        (global_step, valid_auc, np.mean(losses)))
            if global_step % args.save_steps == 0:
                model_save_dir = os.path.join(args.output_dir,
                                              conf_dict["model_path"])
                model_path = os.path.join(model_save_dir, str(global_step))

                if not os.path.exists(model_save_dir):
                    os.makedirs(model_save_dir)
                if args.task_mode == "pairwise":
                    feed_var_names = [left.name, pos_right.name]
                    target_vars = [left_feat, pos_score]
                else:
                    feed_var_names = [
                        left.name,
                        right.name,
                    ]
                    target_vars = [left_feat, pred]
                fluid.io.save_inference_model(model_path, feed_var_names,
                                              target_vars, exe, test_prog)
                logging.info("saving infer model in %s" % model_path)

        except fluid.core.EOFException:
            train_loader.reset()
            break
    end_time = time.time()
    #logging.info("epoch: %d, loss: %f, used time: %d sec" %
    #(epoch_id, np.mean(losses), end_time - start_time))
    ce_info.append([np.mean(losses), end_time - start_time])
    #final save
    logging.info("the final step is %s" % global_step)
    model_save_dir = os.path.join(args.output_dir, conf_dict["model_path"])
    model_path = os.path.join(model_save_dir, str(global_step))
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    if args.task_mode == "pairwise":
        feed_var_names = [left.name, pos_right.name]
        target_vars = [left_feat, pos_score]
    else:
        feed_var_names = [
            left.name,
            right.name,
        ]
        target_vars = [left_feat, pred]
    fluid.io.save_inference_model(model_path, feed_var_names, target_vars, exe,
                                  test_prog)
    logging.info("saving infer model in %s" % model_path)
    # used for continuous evaluation
    if args.enable_ce:
        card_num = get_cards()
        ce_loss = 0
        ce_time = 0
        try:
            ce_loss = ce_info[-1][0]
            ce_time = ce_info[-1][1]
        except:
            logging.info("ce info err!")
        print("kpis\teach_step_duration_%s_card%s\t%s" %
              (args.task_name, card_num, ce_time))
        print("kpis\ttrain_loss_%s_card%s\t%f" %
              (args.task_name, card_num, ce_loss))

    if args.do_test:
        if args.task_mode == "pairwise":
            # Get Feeder and Reader
            get_test_examples = simnet_process.get_reader("test")
        else:
            # Get Feeder and Reader
            get_test_examples = simnet_process.get_reader("test")
        test_result = valid_and_test(test_prog, test_loader, get_test_examples,
                                     simnet_process, "test", exe, [pred.name])
        if args.compute_accuracy:
            test_auc, test_acc = test_result
            logging.info("AUC of test is %f, Accuracy of test is %f" %
                         (test_auc, test_acc))
        else:
            test_auc = test_result
            logging.info("AUC of test is %f" % test_auc)
Example #32
0
 def __init__(self):
     self.host_manager = utils.import_class(SCHEDULER_HOST_MANAGER)()
Example #33
0
 def __get_object(self, path_to_class):
     class_name = self.__extract_class_name(path_to_class)
     module_path = self.__reconstruct_module_path(path_to_class)
     return utils.import_class(module_path, class_name)
Example #34
0
def get_install_handler_class(target_platform):
    return import_class('handlers.platforms.%s.InstallHandler' % target_platform)
Example #35
0
def get_package_parser_class(target_platform):
    return import_class('parsers.platforms.%s.CLIPackageParser' % target_platform)
Example #36
0
def get_package_parser_class(software_platform):
    return import_class('parsers.platforms.%s.CLIPackageParser' % software_platform)
Example #37
0
def get_install_handler_class(ctx):
    if ctx.host.family == UNKNOWN:
        discover_platform_info(ctx)

    return import_class('handlers.platforms.%s.InstallHandler' % ctx.host.software_platform)
Example #38
0
def get_inventory_handler_class(target_platform):
    return import_class('handlers.platforms.%s.InventoryHandler' % target_platform)
 def widget_class(self):
     return import_class(self.widget_class_path)
Example #40
0
def get_install_handler_class(ctx):
    if ctx.host.family == UNKNOWN:
        discover_platform_info(ctx)

    return import_class('handlers.base.BaseInstallHandler')
Example #41
0
def get_schema_migrate_class(version):
    return import_class('schema.migrate_to_version_%s.SchemaMigrate' % version)
 def field_class(self):
     return import_class(self.field_class_path)
Example #43
0
def train(conf_dict):
    """
    train process
    """
    # Get data layer
    data = layers.DataLayer()
    # Load network structure dynamically
    net = utils.import_class(
        "nets", conf_dict["net"]["module_name"], conf_dict["net"]["class_name"])(conf_dict)
    # Load loss function dynamically
    loss = utils.import_class(
        "losses", conf_dict["loss"]["module_name"], conf_dict["loss"]["class_name"])(conf_dict)
    # Load Optimization method
    optimizer = utils.import_class(
        "optimizers", "paddle_optimizers", conf_dict["optimizer"]["class_name"])(conf_dict)

    # Get service
    place = fluid.core.CPUPlace()
    if conf_dict["task_mode"] == "pairwise":
        # Build network
        left = data.ops(name="left", shape=[1], dtype="int64", lod_level=1)
        pos_right = data.ops(name="right", shape=[
                             1], dtype="int64", lod_level=1)
        neg_right = data.ops(name="neg_right", shape=[
                             1], dtype="int64", lod_level=1)
        left_feat, pos_score = net.predict(left, pos_right)
        _, neg_score = net.predict(left, neg_right)
        avg_cost = loss.compute(pos_score, neg_score)
        # Get Feeder and Reader
        feeder = fluid.DataFeeder(place=place, feed_list=[
                                  left.name, pos_right.name, neg_right.name])
        reader = data_reader.get_reader(conf_dict, False, None)
    else:
        # Build network
        left = data.ops(name="left", shape=[1], dtype="int64", lod_level=1)
        right = data.ops(name="right", shape=[1], dtype="int64", lod_level=1)
        label = data.ops(name="label", shape=[1], dtype="int64", lod_level=0)
        left_feat, pred = net.predict(left, right)
        avg_cost = loss.compute(pred, label)
        # Get Feeder and Reader
        feeder = fluid.DataFeeder(place=place, feed_list=[
                                  left.name, right.name, label.name])
        reader = data_reader.get_reader(conf_dict, False, None)
    # Save Infer model
    infer_program = fluid.default_main_program().clone()
    # operate Optimization
    optimizer.ops(avg_cost)
    # optimize memory 
    fluid.memory_optimize(fluid.default_main_program())
    executor = fluid.Executor(place)
    executor.run(fluid.default_startup_program())
    # Get and run executor
    parallel_executor = fluid.ParallelExecutor(
        use_cuda=False, loss_name=avg_cost.name,
        main_program=fluid.default_main_program())
    # Get device number
    device_count = parallel_executor.device_count
    logging.info("device count: %d" % device_count)
    # run train
    logging.info("start train process ...")
    for epoch_id in range(conf_dict["epoch_num"]):
        losses = []
        # Get batch data iterator
        batch_data = paddle.batch(reader, conf_dict["batch_size"], drop_last=False)
        start_time = time.time()
        for iter, data in enumerate(batch_data()):
            if len(data) < device_count:
                continue
            avg_loss = parallel_executor.run(
                [avg_cost.name], feed=feeder.feed(data))
            print("epoch: %d, iter: %d, loss: %f" %
                (epoch_id, iter, np.mean(avg_loss[0])))
            losses.append(np.mean(avg_loss[0]))
        end_time = time.time()
        print("epoch: %d, loss: %f, used time: %d sec" %
              (epoch_id, np.mean(losses), end_time - start_time))
        model_save_dir = conf_dict["model_path"]
        model_path = os.path.join(model_save_dir, str(epoch_id))
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)
        if conf_dict["task_mode"] == "pairwise":
            feed_var_names = [left.name, pos_right.name]
            target_vars = [left_feat, pos_score]
        else:
            feed_var_names = [left.name, right.name]
            target_vars = [left_feat, pred]
        fluid.io.save_inference_model(
            model_path, feed_var_names, target_vars, executor, infer_program)
Example #44
0
import random

from zipkin_data import ZipkinId
from utils import import_class
import defaults as settings


class BaseIdGenerator(object):
    def generate_trace_id(self):
        raise NotImplementedError

    def generate_span_id(self):
        raise NotImplementedError


class SimpleIdGenerator(BaseIdGenerator):
    @staticmethod
    def generate_id():
        return ZipkinId.from_binary(random.randrange(ZipkinId.MIN_VAL, ZipkinId.MAX_VAL))

    def generate_trace_id(self):
        return self.generate_id()

    def generate_span_id(self):
        return self.generate_id()


default = import_class(settings.ZIPKIN_ID_GENERATOR_CLASS)()
Example #45
0
def get_connection_handler_class(ctx):
    return import_class('handlers.base.BaseConnectionHandler')
Example #46
0
    @_clear_and_retry_on_attribute_error
    def _record_binary_annotation(self, annotation):
        self.thread_local_data.binary_annotations.append(annotation)

    @_clear_and_retry_on_attribute_error
    def get_annotations(self):
        return self.thread_local_data.annotations

    @_clear_and_retry_on_attribute_error
    def get_binary_annotations(self):
        return self.thread_local_data.binary_annotations

    @_clear_and_retry_on_attribute_error
    def set_rpc_name(self, name):
        self.thread_local_data.rpc_name = name

    @_clear_and_retry_on_attribute_error
    def get_rpc_name(self):
        return self.thread_local_data.rpc_name

    @classmethod
    def clear(cls):
        cls.thread_local_data = threading.local()
        cls.thread_local_data.zipkin_data = ZipkinData()
        cls.thread_local_data.annotations = []
        cls.thread_local_data.binary_annotations = []
        cls.thread_local_data.rpc_name = None


default = import_class(settings.ZIPKIN_DATA_STORE_CLASS)()
Example #47
0
def get_connection_handler_class(target_platform):
    return import_class('handlers.platforms.%s.ConnectionHandler' % target_platform)
Example #48
0

webdialog_config = ConfigParser.ConfigParser()
try :
    webdialog_config.read("config.cfg")
except Exception:
    pass


#-------- set the dialog state object ----
import DialogState
dialog_state_class = DialogState.dialogState
if webdialog_config.has_option("webdialog", "dialog_state_class"):
    class_name = webdialog_config.get("webdialog", "dialog_state_class")
    try:
        dialog_state_class = utils.import_class(class_name)
    except Exception:
        print "unable to import dialogState", class_name
        traceback.print_exc()

#---------    Configure the server -----
web.config.debug = True
if webdialog_config.has_option("webdialog", "debug"):
    web.config.debug = (webdialog_config.has_option("webdialog", "debug"))
    
web.config.session_parameters['timeout'] = 86400*30  # thirty days
web.config.session_parameters['secret_key'] = '7a68f02c95dda0f81424d3ff68815151'
if webdialog_config.has_option("webdialog", "session_secret_key"):
    web.config.session_parameters['secret_key'] = webdialog_config.get("webdialog", "session_secret_key")