Beispiel #1
0
    def eliminar(self):
        self.model.pedido_id = int(ARG)
        self.model.select()
        cliente = Factory().make('Cliente', self.model.cliente)
        self.model.delete()

        redirect("cliente/ver", cliente.cliente_id)
Beispiel #2
0
    def __init__(self,
                 fold,
                 conf,
                 data_conf,
                 cache_manager,
                 args,
                 inference=False,
                 verbose=True):
        self._args = args
        self._fold = fold
        self._conf = conf
        self._data_conf = data_conf
        self._inference = inference
        self._verbose = verbose
        self.tmp_dir = self._data_conf['tmp']

        # we save output with this folder structure:
        # output/
        #       -> tensorboard/ (tensorboard results)
        #       -> results/ (output files: images, illuminant, GT, etc...)
        #       -> checkpoint.pth.tar (checkpoint to continue training in case of failure)
        #       -> model_best.pth.tar (best checkpoint, for inference)
        self._pretrained_model = None
        if not self._inference:
            output_dir = os.path.join(self._args.outputfolder, str(self._fold))
            self._tensorboard_dir = os.path.join(output_dir, 'tensorboard')
            self._results_dir = os.path.join(output_dir, 'results')
            self._best_checkpoint_file = os.path.join(output_dir,
                                                      'model_best.pth.tar')
            self._checkpoint_file = os.path.join(output_dir,
                                                 'checkpoint.pth.tar')
            self._pretrained_model = self._args.pretrainedmodel

            # create all directories
            os.makedirs(self._tensorboard_dir, exist_ok=True)
        else:
            # for inference all results are saved under the output directory
            # (images, illuminant, GT, etc...)
            self._results_dir = self._args.outputfolder
            if isinstance(self._args.checkpointfile, list):
                self._checkpoint_file = self._args.checkpointfile[fold]
            else:
                self._checkpoint_file = self._args.checkpointfile

        self._display = Display(self._conf)
        self._factory = Factory(self._conf, self._data_conf, cache_manager,
                                self._args, verbose)
        self._cache_manager = cache_manager

        # create output directory
        os.makedirs(self._results_dir, exist_ok=True)

        os.environ['TORCH_HOME'] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir,
            'torch_model_zoo')
Beispiel #3
0
    def __init__(self, config_filename):
        # configuration
        config = Config()
        config_file = "{}/{}".format(config.config_dir, config_filename)
        config.update_config(config_file)

        # word embedding
        print("setting word embedding...")
        word_embedding = Embedding()

        word_embedding_file = "{}/word_embedding_{}.pkl".format(
            config.cache_dir, config.config_name)
        print("loading word embedding from {}...".format(word_embedding_file))
        word_embedding.load_word_embedding(word_embedding_file)

        # demo dataset
        print("setting demo dataset...")
        self.demo_dataset = Dataset(config.data_config)

        self.demo_dataset.set_word_to_index(word_embedding.word2index)

        label_mapping_file = "{}/label_mapping_{}.pkl".format(
            config.cache_dir, config.config_name)
        print("loading label mapping from {}...".format(label_mapping_file))
        self.demo_dataset.load_label_mapping(label_mapping_file)

        # model
        new_model_config = {
            "vocab_size": word_embedding.vocab_size,
            "word_dim": word_embedding.word_dim,
            "document_length": self.demo_dataset.document_length,
            "sentence_length": self.demo_dataset.sentence_length,
            "num_labels": self.demo_dataset.num_labels
        }
        config.update_model_config(new_model_config)

        model = Model(config.model_config)

        # model factory
        self.network = Factory(model)

        self.network.set_test_module()
        print("number of GPUs: {}".format(self.network.num_gpus))
        print("device: {}".format(self.network.device))

        # load model
        model_file = "{}/model_{}.pkl".format(config.cache_dir,
                                              config.config_name)
        print("loading model from {}...".format(model_file))
        self.network.load_model(model_file)

        self.network.model_to_device()
        self.network.eval_mode()
    def test_24_hours_shifts(self):
        # 1 hour to load, 1 hour to produce
        # produce 2 every 2 hours
        # ie 24 - 1 (1 to start the machine) / 2 = 10
        machine, spec, stock = create_machine(stocking_zone_size=None)
        factory = Factory()
        factory.add_worker(Worker(working_hour = 8 * 60))
        factory.add_worker(Worker(working_hour = 8 * 60))
        factory.add_worker(Worker(working_hour = 8 * 60))
        factory.add_production_unit(machine)
        factory.run(24 * 60)

        self.assertEquals(stock.count(), 720 - 1)
Beispiel #5
0
def get_factory(yaml_conf):
    yaml = load(yaml_conf)
    factory = Factory(name=yaml["name"])
    materials = create_materials(yaml)
    for production_unit in yaml["production_units"]:
        spec = create_spec(materials, production_unit)
        config = {}
        config["rate_by_minute"] = production_unit.get("rate", 1)
        factory.add_production_unit(
            ProductionUnit(spec=spec,
                           config=config,
                           name=production_unit["name"]))

    for worker in yaml.get("workers", []):
        working_hour = worker.get("working_hour", 8) * 60
        factory.add_worker(Worker(working_hour=working_hour))
    return factory
Beispiel #6
0
    def ver(self, pedido, denominacion):
        coleccion = pedido.producto_collection
        total = []
        for producto in coleccion:
            producto.subtotal = producto.fm * producto.precio
            total.append(producto.subtotal)

        archivo = '{}/pedido_ver.html'.format(STATIC_PATH)
        contenido = Template(archivo).render_dict(coleccion, tag="filapepro")

        cliente = Factory().make('Cliente', pedido.cliente)
        domicilio = cliente.domicilio
        diccionario = vars(pedido)
        diccionario.update(vars(domicilio))
        diccionario['denominacion'] = denominacion
        diccionario['total'] = sum(total)
        contenido = Template(base=contenido).render(diccionario)

        print(HTTP_HTML, "\n")
        print(Template(TEMPLATE_PATH).render_inner(contenido))
Beispiel #7
0
 def test_factory_add_worker(self):
     factory = Factory()
     factory.add_worker(Worker())
     self.assertEquals(len(factory.workers), 1)
Beispiel #8
0
 def test_factory_is_aware_of_time(self):
     factory = Factory()
     factory.run()
     assert_that(factory.time, is_(1))
Beispiel #9
0
 def test_factory_add_production_unit(self):
     factory = Factory()
     factory.add_production_unit(ProductionUnit(None))
     self.assertEquals(len(factory.production_units), 1)
Beispiel #10
0
def test(config_filename):
    # configuration
    config = Config()
    config_file = "{}/{}".format(config.config_dir, config_filename)
    config.update_config(config_file)

    # logger
    log_file = "{}/test_{}.txt".format(config.log_dir, config.config_name)
    logger = Logger(log_file)

    # word embedding
    logger.info("setting word embedding...")
    word_embedding = Embedding()

    word_embedding_file = "{}/word_embedding_{}.pkl".format(
        config.cache_dir, config.config_name)
    logger.info(
        "loading word embedding from {}...".format(word_embedding_file))
    word_embedding.load_word_embedding(word_embedding_file)

    logger.info("vocab_size: {}".format(word_embedding.vocab_size))
    logger.info("word_dim  : {}".format(word_embedding.word_dim))

    # testing dataset
    logger.info("setting testing dataset...")
    test_dataset = Dataset(config.data_config)

    test_dataset.set_word_to_index(word_embedding.word2index)

    label_mapping_file = "{}/label_mapping_{}.pkl".format(
        config.cache_dir, config.config_name)
    logger.info("loading label mapping from {}...".format(label_mapping_file))
    test_dataset.load_label_mapping(label_mapping_file)

    test_data_file = "{}/{}".format(config.data_dir, config.test_data_file)
    logger.info("loading data from {}...".format(test_data_file))
    test_dataset.load_data_from_file(test_data_file)
    logger.info("number of samples: {}".format(test_dataset.num_samples))

    logger.info("processing data...")
    test_dataset.process_data_from_file()

    # model
    new_model_config = {
        "vocab_size": word_embedding.vocab_size,
        "word_dim": word_embedding.word_dim,
        "document_length": test_dataset.document_length,
        "sentence_length": test_dataset.sentence_length,
        "num_labels": test_dataset.num_labels
    }
    config.update_model_config(new_model_config)

    model = Model(config.model_config)

    # metric
    metric = Metric()

    # test configuration
    logger.info("configuration: {}".format(config))

    # data loader
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=config.batch_size,
                                  shuffle=False)

    # model factory
    network = Factory(model)
    network.set_test_module()
    logger.info("number of GPUs: {}".format(network.num_gpus))
    logger.info("device: {}".format(network.device))

    # load model
    model_file = "{}/model_{}.pkl".format(config.cache_dir, config.config_name)
    logger.info("loading model from {}...".format(model_file))
    network.load_model(model_file)

    network.model_to_device()

    # test
    network.eval_mode()
    test_preds = np.zeros([0, test_dataset.num_labels], dtype=np.int)
    test_labels = np.zeros([0, test_dataset.num_labels], dtype=np.int)
    for batch, data in enumerate(test_data_loader):
        sequences_ttl, sequences_cnt, labels = data
        preds = network.test(sequences_ttl, sequences_cnt)
        test_preds = np.concatenate((test_preds, preds), axis=0)
        test_labels = np.concatenate(
            (test_labels, labels.numpy().astype(np.int)), axis=0)

    # metrics
    ac, mp, mr, mf = metric.all_metrics(test_preds, test_labels)
    logger.info("Acc: {:.4f}".format(ac))
    logger.info("MP : {:.4f}".format(mp))
    logger.info("MR : {:.4f}".format(mr))
    logger.info("MF : {:.4f}".format(mf))
Beispiel #11
0
def train(config_filename):
    # configuration
    config = Config()
    config_file = "{}/{}".format(config.config_dir, config_filename)
    config.update_config(config_file)

    # logger
    log_file = "{}/train_{}.txt".format(config.log_dir, config.config_name)
    logger = Logger(log_file)

    # word embedding
    logger.info("setting word embedding...")
    word_embedding = Embedding()

    train_data_file = "{}/{}".format(config.data_dir, config.train_data_file)
    word_vector_file = "{}/{}".format(config.src_dir, config.word_vector_file)
    vocab_list_file = "{}/vocab_list_{}.txt".format(config.cache_dir,
                                                    config.config_name)
    word_embedding_file = "{}/word_embedding_{}.pkl".format(
        config.cache_dir, config.config_name)

    if not os.path.exists(word_embedding_file):
        logger.info("building word embedding...")
        word_embedding.build_word_embedding(train_data_file, word_vector_file,
                                            vocab_list_file,
                                            word_embedding_file)

    logger.info(
        "loading word embedding from {}...".format(word_embedding_file))
    word_embedding.load_word_embedding(word_embedding_file)

    logger.info("vocab_size: {}".format(word_embedding.vocab_size))
    logger.info("word_dim  : {}".format(word_embedding.word_dim))

    # training dataset
    logger.info("setting training dataset...")
    train_dataset = Dataset(config.data_config)

    train_dataset.set_word_to_index(word_embedding.word2index)

    train_data_file = "{}/{}".format(config.data_dir, config.train_data_file)
    logger.info("loading data from {}...".format(train_data_file))
    train_dataset.load_data_from_file(train_data_file)
    logger.info("number of samples: {}".format(train_dataset.num_samples))

    label_list_file = "{}/label_list_{}.txt".format(config.cache_dir,
                                                    config.config_name)
    label_mapping_file = "{}/label_mapping_{}.pkl".format(
        config.cache_dir, config.config_name)
    logger.info("building label mapping...")
    train_dataset.build_label_mapping(label_list_file, label_mapping_file)

    logger.info("processing data...")
    train_dataset.process_data_from_file()

    # validation dataset
    logger.info("setting validation dataset...")
    valid_dataset = Dataset(config.data_config)

    valid_dataset.set_word_to_index(word_embedding.word2index)

    label_mapping_file = "{}/label_mapping_{}.pkl".format(
        config.cache_dir, config.config_name)
    logger.info("loading label mapping from {}...".format(label_mapping_file))
    valid_dataset.load_label_mapping(label_mapping_file)

    valid_data_file = "{}/{}".format(config.data_dir, config.valid_data_file)
    logger.info("loading data from {}...".format(valid_data_file))
    valid_dataset.load_data_from_file(valid_data_file)
    logger.info("number of samples: {}".format(valid_dataset.num_samples))

    logger.info("processing data...")
    valid_dataset.process_data_from_file()

    # model
    new_model_config = {
        "vocab_size": word_embedding.vocab_size,
        "word_dim": word_embedding.word_dim,
        "document_length": train_dataset.document_length,
        "sentence_length": train_dataset.sentence_length,
        "num_labels": train_dataset.num_labels
    }
    config.update_model_config(new_model_config)

    model = Model(config.model_config)

    # metric
    metric = Metric()

    # train configuration
    logger.info("configuration: {}".format(config))

    # data loader
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=config.batch_size,
                                   shuffle=True)
    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=config.batch_size,
                                   shuffle=False)

    # model factory
    network = Factory(model)
    network.set_train_module()
    logger.info("number of GPUs: {}".format(network.num_gpus))
    logger.info("device: {}".format(network.device))

    # set word embedding
    network.set_word_embedding(word_embedding.matrix)

    network.model_to_device()

    # train and validate
    max_mf = 0
    epoch_count = 0
    for epoch in range(config.num_epochs):
        logger.info("----------------------------------------")

        # train
        network.train_mode()
        for batch, data in enumerate(train_data_loader):
            sequences_ttl, sequences_cnt, labels = data
            loss = network.train(sequences_ttl, sequences_cnt, labels)
            if batch > 0 and batch % config.info_interval == 0:
                logger.info("epoch: {} | batch: {} | loss: {:.6f}".format(
                    epoch, batch, loss))

        # validate
        network.eval_mode()
        valid_preds = np.zeros([0, valid_dataset.num_labels], dtype=np.int)
        valid_labels = np.zeros([0, valid_dataset.num_labels], dtype=np.int)
        for batch, data in enumerate(valid_data_loader):
            sequences_ttl, sequences_cnt, labels = data
            preds, loss = network.validate(sequences_ttl, sequences_cnt,
                                           labels)
            valid_preds = np.concatenate((valid_preds, preds), axis=0)
            valid_labels = np.concatenate(
                (valid_labels, labels.numpy().astype(np.int)), axis=0)

        # metrics
        ac, mp, mr, mf = metric.all_metrics(valid_preds, valid_labels)
        logger.info("Acc: {:.4f}".format(ac))
        logger.info("MP : {:.4f}".format(mp))
        logger.info("MR : {:.4f}".format(mr))
        logger.info("MF : {:.4f}".format(mf))

        # early stop
        if mf >= max_mf:
            max_mf = mf
            epoch_count = 0
            model_file = "{}/model_{}.pkl".format(config.cache_dir,
                                                  config.config_name)
            logger.info("saving model to {}...".format(model_file))
            network.save_model(model_file)
        else:
            epoch_count += 1
            if epoch_count == config.early_stop:
                logger.info("stop training process.")
                logger.info("best epoch: {}".format(epoch - epoch_count))
                break