def main(args=None): if args is None: args = sys.argv[1:] args = _parse_args(args) conf = get_config(args.config) run(conf)
def action_apply(self): """ Apply parameters to relevant .nif files """ if self.nif_files_list_widget.count() == 0: QMessageBox.warning(self, "No .nif files loaded", "Don't forget to load .nif files !") return if self.nif_files_list_widget.count() >= get_config().getint( "DEFAULT", "softLimit"): box = QMessageBox() box.setIcon(QMessageBox.Question) box.setWindowTitle('Are you sure ?') box.setText( "The tool may struggle with more than 100 .nif files at once. We advise you to process small batches.\n\nAre you sure you wish to continue ?" ) box.setStandardButtons(QMessageBox.Yes | QMessageBox.No) buttonY = box.button(QMessageBox.Yes) buttonY.setText('Yes') buttonN = box.button(QMessageBox.No) buttonN.setText('No') box.exec_() if box.clickedButton() == buttonN: return log.info("Applying parameters to " + str(self.nif_files_list_widget.count()) + " files ...") self.toggle(False) self.progress_bar.setValue(0) self.processed_files = itertools.count() CONFIG.set("NIF", "Glossiness", str(self.spin_box_glossiness.value())), CONFIG.set("NIF", "SpecularStrength", str(self.spin_box_specular_strength.value())), save_config() QMessageBox.warning( self, "Attention !", "The process is quite slow.\n\nThe gui will be mostly unresponsive to your input. Don't close the application, unless the completion pourcentage has not been updated in a long time (several minutes).\nIt took me 13 minutes to process 100 files for example." ) #for indices in chunkify(range(self.nif_files_list_widget.count()), QThreadPool.globalInstance().maxThreadCount()-1): QThreadPool.globalInstance().setExpiryTimeout(-1) for index in range(self.nif_files_list_widget.count()): item = self.nif_files_list_widget.item(index) worker = NifProcessWorker( index=index, path=item.text(), keywords=self.keywords, glossiness=self.spin_box_glossiness.value(), specular_strength=self.spin_box_specular_strength.value()) worker.signals.start.connect(self.start_apply_action) worker.signals.result.connect(self.result_apply_action) worker.signals.finished.connect(self.finish_apply_action) QThreadPool.globalInstance().start(worker)
def get_serialized_pipeline(train): from src.features import counting_feat, knn_similarity config = get_config() feature_names = [ file_name for file_name in listdir(config['features_dump_dir']) ] return Pipeline([ ('read', ReadFeatures(feature_names)), ("train_search", knn_similarity.TrainSearch(train=train)), ('tfidf', counting_feat.BagOfTfIDF(train)), # cb ('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler()) ])
def run(conf_path, finetune): conf = get_config(conf_path) # Create generators datasets = { x: CelebAGenerator.from_conf(conf, is_train=x == 'train') for x in ['train', 'val'] } # Get new training directory save_folder = create_train_directory(conf.path.models_root) conf_filename = os.path.split(conf_path)[-1] save_full_ini(conf_path, os.path.join(save_folder, conf_filename)) train_multilabel(datasets, save_folder, conf, finetune=finetune)
def bootstrap_config(config_id: str, should_make_config_immutable: bool = True) -> ConfigType: """Prepare the config object Args: config_id (str): config_id to load should_make_config_immutable (bool, optional): Should the config object be immutable. Defaults to True. Returns: ConfigType: Config Object """ config = get_config( config_id, should_make_config_immutable=should_make_config_immutable) write_debug_message( f"Starting Experiment at {time.asctime(time.localtime(time.time()))}") write_debug_message(f"torch version = {torch.__version__}") # type: ignore set_seed(seed=config.general.seed) return config
def get_serialized_pipeline(train): from src.features import counting_feat, knn_similarity config = get_config() black_list = [ 'polarity', 'subjectivity', 'sent_nrc', 'discourse_rel', 'discourse_it', 'in_chunk_last_it', 'in_chunk_last_rel', 'in_chunk_first_it', 'in_chunk_first_rel' ] read_feature_names = [ file_name for file_name in listdir(config['features_dump_dir']) if file_name not in black_list ] all_feature_names = read_feature_names + counting_feat.BagOfTfIDFN.FEATS + knn_similarity.TrainSearch.FEATS print(all_feature_names) return Pipeline([ ('read', ReadFeatures(read_feature_names)), ("train_search", knn_similarity.TrainSearch(train=train)), ('tfidf', counting_feat.BagOfTfIDFN(train)), # cb ('transform', ToMatrix(features=all_feature_names)), ('norm', MinMaxScaler()) ])
from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from os.path import join from src.data.debates import Debate from src.data.debates import read_debates from src.utils.config import get_config from src.utils.timing import timing CONFIG = get_config() class ClaimBusterScraper(object): """ Used for scraping the score number of the ClaimBuster Engine for a given sentence. Selenium is used for scraping. >>>cb = ClaimBusterScraper() >>>cb.get_score("That is wrong!") """ CB_URL = "http://idir-server2.uta.edu/claimbuster/demo" def initialize(self): self.driver = webdriver.PhantomJS('../../phantomjs/bin/phantomjs') self.driver.get(self.CB_URL) def __init__(self): self.initialize() def get_score(self, text): """
""" Main HTTP server entrypoint. """ import time import sanic import jsonschema import traceback from jsonschema.exceptions import ValidationError from src.utils.config import get_config from src.utils.schemas import load_schemas from src.utils import re_api from src.exceptions import InvalidParams, REError _CONF = get_config() _SCHEMAS = load_schemas() app = sanic.Sanic() app.config.API_TITLE = 'Taxonomy RE API' app.config.API_DESCRIPTION = 'Taxonomy data API using the relation engine.' app.config.API_PRODUCES_CONTENT_TYPES = ['application/json'] def transform_taxon_results(taxa, ns, ns_config): """ Make some modifications on any taxon results given a namespace config (see _NS_CONFIG) Mutates each dict in the given `taxa` list """ for taxon in taxa: taxon['ns'] = ns
def __init__(self, feature_names): config = get_config() self._feature_dicts = {} for feature in feature_names: self._feature_dicts[feature] = json.loads( open(config['features_dump_dir'] + feature).read())
def __init__(self): config = get_config("postgresql") self.connection = psycopg2.connect(**config)
from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt from src.utils.config import get_config sim_path = Path("F:\\KMC_data\\data_2021_03_23_v1\\11_7_7_random_0_a_0_1.0") files_in_list = (sim_path / "heat_map").glob('*.dat') config = get_config(sim_path / "input.kmc") files_in_list = sorted(list(files_in_list), key=lambda i: float(i.stem)) last_data = pd.read_csv(files_in_list[0], sep='\t', names=['x', 'y', 'z', 'direction', 'count']) diff_data = last_data jump_plot = [] for file_in in files_in_list[1:]: new_data = pd.read_csv(file_in, sep='\t', names=['x', 'y', 'z', 'direction', 'count']) diff_data['count'] = new_data['count'] - last_data['count'] if file_in.stem == '19500': print() temp = diff_data[diff_data['direction'] == 0]
def __init__(self): self.db = Database() config = get_config("tesseract") pytesseract.pytesseract.tesseract_cmd = config["LOCATION_PATH"]
# -*- coding: utf-8 -*- import sys import logging from PySide2.QtWidgets import QApplication from src.pyqt.NifBatchTools.NifBatchTools import NifBatchTools from src.utils.config import get_config if __name__ == '__main__': try: logging.basicConfig( filemode="w", filename="htool.log", level=logging.getLevelName(get_config().get("LOG", "level")), format='%(asctime)s - [%(levelname)s] - %(name)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') if not get_config().get("LOG", "enabled"): logger = logging.getLogger() logger.disabled = True logging.info(" =============== STARTING LOGGING ===============") logging.info("Log Level : " + get_config().get("LOG", "level")) app = QApplication(sys.argv) tool = NifBatchTools() tool.setAppStyle(app) tool.open() sys.exit(app.exec_()) except SystemExit: