def csv_path(file_name): root_folder = os.path.join(get_root(), AccentDataLoader.DATASET_DIR, AccentDataLoader.TRAINING_FILES_DIR) verify_folder(root_folder) return os.path.join(root_folder, file_name)
def copy_context(self): model_type_path = os.path.join(get_root(), "saved_models", self.config.exp.name, self.experiment_id) verify_folder(model_type_path) # copying CSV file csv_name = self.config.data_loader.data_file path_from = os.path.join(get_root(), "datasets/training_files", csv_name) path_to = os.path.join(model_type_path, csv_name) shutil.copy(path_from, path_to) # dumping the config json_config = json.dumps(self.config.toDict(), indent=4) path_to = os.path.join(model_type_path, "config.json") with open(path_to, "w") as f: f.write(json_config)
def load_model(self, model_type, model_num): # set the bucket path model_path = "/".join((self._models_folder(), model_type, model_num)) # set the local path dest_path = os.path.join(get_root(), "saved_models", model_type, model_num) # get the file self.client.download_file(model_path, "model.h5", dest_path) return os.path.join(dest_path, "model.h5")
def load_reference_data(): """Load and cache reference data""" reference_file = os.path.join(get_root(), config.REFERENCE_DATA_FILE) updated_at = datetime.datetime.fromtimestamp(os.path.getmtime(reference_file)) # Update reference file modified time and clear the cache if file got updated if DATA.get("reference_updated_at", updated_at) > updated_at: DATA.pop("reference_data") DATA.pop("reference_updated_at") if "reference_data" not in DATA: DATA["reference_data"] = pd.read_csv(reference_file) DATA["reference_updated_at"] = datetime.datetime.fromtimestamp(os.path.getmtime(reference_file))
def setup_logger() -> dict: """Logger setup configuration. Returns logger configuration""" # Get log directory path log_root = get_root() # Load logger configuration with open(os.path.join(log_root, 'log', 'logging.yaml'), 'rt') as log_conf: log_config = yaml.load(log_conf.read()) # Get handlers and loggers from configuration handlers = log_config.pop('handlers', {}) loggers = log_config.get('loggers', {}) new_handlers = {} # Setup files location and handler for each logger defined for logger_name, logger_config in loggers.items(): new_handler_names = [] for handler in logger_config.pop('handlers'): log_conf = deepcopy(handlers[handler]) log_dir = log_conf.pop('log_dir', None) log_dir = log_dir if log_dir else 'applog' log_dir = os.path.join(log_root, log_dir, logger_name) log_file = log_conf.get('filename') if log_file: log_file = os.path.join(log_dir, log_file) # Create log directory if not exists os.makedirs(os.path.dirname(log_file), exist_ok=True) log_conf['filename'] = log_file new_name = '{}_{}'.format(handler, logger_name) new_handler_names.append(new_name) new_handlers[new_name] = log_conf logger_config['handlers'] = new_handler_names log_config['handlers'] = new_handlers # Return logger configuration return log_config
def save_model(self): if not self.experiment_id: import datetime self.experiment_id = datetime.datetime.now().strftime( "%Y-%m-%d_%H-%M-%S") model_type_path = os.path.join(get_root(), "saved_models", self.config.exp.name, self.experiment_id) verify_folder(model_type_path) name = "model.h5" model_path = os.path.join(model_type_path, name) self.model.save(model_path) self.copy_context() self.cloud_upload(model_type_path, self.config.exp.name, self.experiment_id)
def __init__(self, csv_filepath, destination_folder="", wait=1.5, debug=False): ''' Initializes GetAudio class object :param destination_folder (str): Folder where audio files will be saved :param wait (float): Length (in seconds) between web requests :param debug (bool): Outputs status indicators to console when True ''' self.csv_filepath = csv_filepath self.audio_df = pd.read_csv(csv_filepath) self.url = 'http://chnm.gmu.edu/accent/soundtracks/{}.mp3' if not destination_folder: self.destination_folder = os.path.join(get_root(), "datasets", "audio") else: self.destination_folder = destination_folder self.wait = wait self.debug = debug
# Command Line Argument parser parser = argparse.ArgumentParser( description='Command Line Argument parser') parser.add_argument('--host', default="localhost", type=str, help='Application host') parser.add_argument('-p', '--port', default=8000, type=int, help='Application port') args = parser.parse_args() observer = Observer() # Schedule the watcher observer.schedule(DataWatchHandler(host=args.host, port=args.port), path=os.path.join(get_root(), "data")) # Starting data file watcher observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
import os import urllib.request import flask # initialize our Flask application and the Keras model from utils.dirs import verify_folder from utils.sound import SoundUtils from utils.utils import get_root from webserver.loader import predict_class_audio, MODEL_TYPE, MODEL_NUM from pydub import AudioSegment from webserver.storage.factory import StorageFactory app = flask.Flask(__name__) app.config['UPLOAD_FOLDER'] = os.path.join(get_root(), "webserver", "uploads") verify_folder(app.config['UPLOAD_FOLDER']) def convert_mp3(current_file): # files dst = current_file + ".wav" # convert wav to mp3 sound = AudioSegment.from_mp3(current_file) sound.export(dst, format="wav") return dst