示例#1
0
    def create_saving_dirs(self):
        ### Update name ###
        self.name += '_MReg%.2fEReg%.2fB%d' % (self.args.magR, self.args.enR,
                                               self.args.BS)

        create_directory('%s/%s' % (self.args.metrics_dir, self.name))
        create_directory('%s/%s' % (self.args.model_dir, self.name))
示例#2
0
def export_software_profiles():
    db_session = DBSession()

    software_profile_list = request.args.getlist(
        'software_profile_list[]')[0].split(",")
    software_profiles = get_software_profile_list(db_session)
    d = {"CSM Server:Software Profile": {}}

    for software_profile in software_profiles:
        if software_profile.name in software_profile_list:
            d["CSM Server:Software Profile"][
                software_profile.name] = software_profile.packages

    temp_user_dir = create_temp_user_directory(current_user.username)
    software_profile_export_temp_path = os.path.normpath(
        os.path.join(temp_user_dir, "software_profile_export"))
    create_directory(software_profile_export_temp_path)
    make_file_writable(software_profile_export_temp_path)

    with open(
            os.path.join(software_profile_export_temp_path,
                         'software_profiles.json'),
            'w') as command_export_file:
        command_export_file.write(json.dumps(d, indent=2))

    return send_file(os.path.join(software_profile_export_temp_path,
                                  'software_profiles.json'),
                     as_attachment=True)
示例#3
0
文件: log.py 项目: smjurcak/csm
def download_system_logs():
    db_session = DBSession()
    logs = db_session.query(Log) \
        .order_by(Log.created_time.desc())

    contents = ''
    for log in logs:
        contents += get_datetime_string(log.created_time) + ' UTC\n'
        contents += log.level + ':' + log.msg + '\n'
        if log.trace is not None:
            contents += log.trace + '\n'
        contents += '-' * 70 + '\n'

    # Create a file which contains the size of the image file.
    temp_user_dir = create_temp_user_directory(current_user.username)
    log_file_path = os.path.normpath(os.path.join(temp_user_dir, "system_logs"))

    create_directory(log_file_path)
    make_file_writable(log_file_path)

    log_file = open(os.path.join(log_file_path, 'system_logs'), 'w')
    log_file.write(contents)
    log_file.close()

    return send_file(os.path.join(log_file_path, 'system_logs'), as_attachment=True)
示例#4
0
def create_mapping_index_page(mapping_file_pathname, template):
    """Create an index html page for a mapping. e.g. http://airm.aero/developers/fixm-4.2.0-to-airm-1.0.0.html

  Keyword arguments:
    mapping_file_pathname -- string defining the location and name of the mapping e.g. data/xlsx/mapping FIXM 4.2.0.xlsx
    template -- string defining the location and name of the html template e.g. data/html/templates/concept-list-template.html
  """

    import mapping
    mapping = mapping.Mapping(mapping_file_pathname)
    mapping_metadata = mapping.metadata
    mapping_dict = mapping.dictionary

    import utils
    utils.create_directory("docs/airm/developers/" +
                           mapping_metadata["url_name"])
    soup = utils.create_html_soup(template)

    soup.title.string = mapping_metadata[
        "name"] + " | Semantic Correspondences | AIRM.aero"
    soup.find(text="MAPPING_NAME_BC").replace_with(mapping_metadata["name"])
    soup.find(text="MAPPING_NAME_H2").replace_with(mapping_metadata["name"])

    for record in mapping_dict:
        new_table_row = create_index_table_row(record, mapping_metadata)
        soup.find('tbody').insert(1, new_table_row)

    f = open("docs/airm/developers/" + mapping_metadata["url_name"] + ".html",
             "w+")
    f.write(soup.prettify())
    f.close()
示例#5
0
文件: main.py 项目: mchuck/tiny-ssg
def build_website(path, dist_path, theme_path, theme, collections, statics):

    website = create_website_model(path, collections)

    log_default('Loading "%s" theme...', theme)
    templates = load_templates(theme_path)

    log_default('Rendering templates...')
    render_templates(dist_path, templates, website)

    # add static files
    if ARGS.static:
        log_default('Adding static files...')
        for static_dir in ARGS.static:
            log_default('\tAdding "%s" folder', static_dir)
            static_path = os.path.join(path, static_dir)
            static_dist_path = os.path.join(dist_path, static_dir)

            for static_file in get_all_files(static_path):
                log_default('\t\tAdding "%s"...', static_file)
                dest_path = static_file.replace(static_path, static_dist_path)
                create_directory(os.path.dirname(dest_path))
                shutil.copy2(static_file, dest_path)

    # copy all non-html files from theme folder
    for theme_file in get_all_files(theme_path, '.html', negative=True):
        dest_path = theme_file.replace(theme_path, dist_path)
        create_directory(os.path.dirname(dest_path))
        shutil.copy2(theme_file, dest_path)

    log_default('Done :thumbs_up:', not_verbose=True)
def get_labels(edges_file, output_dir="", read_cols=None, cols_to_rename=None):
    '''
    Loads the data from a provided file and transforms the data to a dataframe with two columns: "node" and "label" ("node" columns contains sorted distinct node ids). Finally saves data to 'nodes_labels.csv' csv file.
    edges_file: filepath to a csv file where nodes and hyperrelations are columns and traversed graph paths are rows
    output_dir: directory where the output dataframe should be saved
    read_cols: list of columns names to be transofmed from provided files
    cols_to_rename: dictionary used to rename columns (which are used as labels); useful if several columns contains the same type of nodes e.g. 'g1' and 'g2' both have label 'gene' but appear separately because of specific graph traversion path
    '''
    utils.create_directory(output_dir)
    usecols = None
    if read_cols != None:
        usecols = read_cols
    raw_df = pd.read_csv(edges_file, usecols=usecols)
    raw_df = rename_cols(raw_df, cols_to_rename)
    labels = raw_df.columns

    enc_dict = encode_columns(labels, output_dir)
    raw_df = raw_df.rename(columns=enc_dict)

    raw_df = pd.DataFrame(raw_df.stack()).droplevel(0)
    raw_df.reset_index(inplace=True)
    raw_df.columns = ["label", "node"]
    raw_df = raw_df[["node", "label"]]
    raw_df["node"] = raw_df["node"].str.replace("V", "")
    raw_df["node"] = pd.to_numeric(raw_df["node"])
    raw_df = raw_df.sort_values(by=["node"])
    raw_df = raw_df.drop_duplicates()
    raw_df.to_csv("{}nodes_labels.csv".format(output_dir), index=False)
示例#7
0
async def download_file(_url, folder, file_id=None) -> None:
    create_directory(folder)
    if not Path(f"{folder}/{file_id}_{_url}").exists() or getsize(Path(f"{folder}/{file_id}_{_url}"))['raw'] == 0:
        async with semaphore, aiohttp.ClientSession(json_serialize=ujson.dumps,
                                         headers={'Connection': 'keep-alive'}) as session:
            async with session.get(f"https://telegra.ph/file/{_url}") as response:
                assert response.status == 200

                path = Path(f"{folder}/{file_id}_{_url}")
                if parser.parse_args().compress:
                    loop = asyncio.get_running_loop()
                    executor = ProcessPoolExecutor()
                    destination = append_extension(str(path), "webp")

                    if not Path(destination).exists() or getsize(destination)['formatted'] == 0:
                        if is_image_by_url(_url):
                            await asyncio.gather(*[loop.run_in_executor(
                                executor,
                                compress_image,
                                await response.read(),
                                Path(destination)) if is_image_by_url(_url) else None])
                        else:
                            await write_file(await response.read(), path)
                            log(f"[download] {file_id}_{_url} — {getsize(path)['formatted']}")

                else:
                    await write_file(await response.read(), path)
                    log(f"[download] {file_id}_{_url} — {getsize(path)['formatted']}")
示例#8
0
文件: log.py 项目: smjurcak/csm
def download_system_logs():
    db_session = DBSession()
    logs = db_session.query(Log) \
        .order_by(Log.created_time.desc())

    contents = ''
    for log in logs:
        contents += get_datetime_string(log.created_time) + ' UTC\n'
        contents += log.level + ':' + log.msg + '\n'
        if log.trace is not None:
            contents += log.trace + '\n'
        contents += '-' * 70 + '\n'

    # Create a file which contains the size of the image file.
    temp_user_dir = create_temp_user_directory(current_user.username)
    log_file_path = os.path.normpath(os.path.join(temp_user_dir,
                                                  "system_logs"))

    create_directory(log_file_path)
    make_file_writable(log_file_path)

    log_file = open(os.path.join(log_file_path, 'system_logs'), 'w')
    log_file.write(contents)
    log_file.close()

    return send_file(os.path.join(log_file_path, 'system_logs'),
                     as_attachment=True)
示例#9
0
 def __init__(self, options):
     self.ports = options.ports
     self.password = options.password
     self.motdfile = options.motd
     self.verbose = options.verbose
     self.debug = options.debug
     self.ldap_server = None
     self.ldap_port = None
     self.ldap_dn = ''
     self.ldap_encryption = None
     self.ldap_require_cert = None
     self.ldap_debug = False
     if ldap_available:
         self.ldap_server = options.ldap_server
         self.ldap_port = options.ldap_port
         self.ldap_dn = options.ldap_dn
         #self.ldap_password = options.ldap_password
         self.ldap_encryption = options.ldap_encryption
         self.ldap_require_cert = options.ldap_require_cert
         self.ldap_debug = options.ldap_debug
     self.logdir = options.logdir
     self.ssl_pem_file = options.ssl_pem_file
     self.statedir = options.statedir
     self.name = socket.getfqdn()[:63]  # Server name limit from the RFC.
     self.channels = {}  # irc_lower(Channel name) --> Channel instance.
     self.clients = {}  # Socket --> Client instance.
     self.nicknames = {}  # irc_lower(Nickname) --> Client instance.
     if self.logdir:
         create_directory(self.logdir)
     if self.statedir:
         create_directory(self.statedir)
def download(model_list_path, output_dir, force):
    model_list = load_document(model_list_path)
    if model_list is None:
        sys.exit(1)
    validate_schema(model_list)
    target_root = os.path.join(output_dir, "models")
    create_directory(target_root, False)
    for model in model_list:
        download_and_convert_model(target_root, model, force)
示例#11
0
def save_model_metadata(model, current_date_time):
    create_directory('models/{}/models_{}/metadata'.format(
        object_type, current_date_time),
                     format=False)
    metadata = dict()

    metadata['project'] = dict()
    metadata['project']['type'] = os.getenv('PROJECT_TYPE').lower()
    metadata['project']['object_type'] = object_type
    metadata['project']['date_time'] = current_date_time

    metadata['models'] = dict()

    model_files = get_subfiles('models/{}/models_{}'.format(
        object_type, current_date_time))

    for model_name in model_files:
        epoch = int(model_name.split('.')[1].split('_')[1].split('-')[0])
        metadata['models'][str(epoch)] = model_name

    # save model config in json format
    model_config_path = save_model_config(model, current_date_time)

    metadata['config'] = dict()
    metadata['config']['model'] = model_config_path

    metadata['config']['data_preparation'] = dict()
    metadata['config']['data_preparation']['source_data'] = os.getenv(
        'SATELLITE_IMAGE_PATH')
    metadata['config']['data_preparation']['tilesize'] = int(
        os.getenv('TILESIZE'))
    metadata['config']['data_preparation']['step'] = int(os.getenv('STEP'))
    metadata['config']['data_preparation']['width'] = int(os.getenv('WIDTH'))
    metadata['config']['data_preparation']['height'] = int(os.getenv('HEIGHT'))
    metadata['config']['data_preparation']['percent_image_with_no_annotations'] = \
        float(os.getenv('PERCENT_IMAGE_WITH_NO_ANNOTATIONS'))
    metadata['config']['data_preparation']['min_annotations_per_image'] = int(
        os.getenv('MIN_ANNOTATIONS_PER_IMAGE'))

    metadata['config']['data_preprocessing'] = dict()
    metadata['config']['data_preprocessing']['percent_valid'] = float(
        os.getenv('PERCENTVALID'))
    metadata['config']['data_preprocessing']['percent_test'] = float(
        os.getenv('PERCENTTEST'))

    metadata['config']['train'] = dict()
    metadata['config']['train']['epochs'] = int(os.getenv('EPOCHS'))
    metadata['config']['train']['train_datasetsize'] = int(
        os.getenv('TRAIN_DATASETSIZE'))
    metadata['config']['train']['valid_datasetsize'] = int(
        os.getenv('VALID_DATASETSIZE'))
    metadata['config']['train']['batchsize'] = int(os.getenv('BATCHSIZE'))

    with open(
            'models/{}/models_{}/metadata/metadata_{}.json'.format(
                object_type, current_date_time, current_date_time), 'w') as f:
        json.dump(metadata, f)
示例#12
0
def convert_config(db_session, http_request, template, schedule_form):

    config_form = init_config_form(db_session, http_request, get=True)

    success, err_msg = download_latest_config_migration_tool()
    if not success:
        return render_template(template,
                               config_form=config_form,
                               input_filename="",
                               err_msg=err_msg,
                               schedule_form=schedule_form,
                               install_action=get_install_migrations_dict(),
                               server_time=datetime.datetime.utcnow())

    # check if the post request has the file part
    if 'file' not in http_request.files:
        flash('No file in request.')
        return render_template(
            template,
            config_form=config_form,
            input_filename="",
            err_msg="Internal error - No file.",
            schedule_form=schedule_form,
            install_action=get_install_migrations_dict(),
            server_time=datetime.datetime.utcnow(),
        )

    input_file = http_request.files['file']
    # if user does not select file, browser also
    # submit a empty part without filename
    if input_file.filename == '':
        flash('No selected file.')
        return render_template(
            template,
            config_form=config_form,
            input_filename="",
            err_msg="Internal error - No selected file.",
            schedule_form=schedule_form,
            install_action=get_install_migrations_dict(),
            server_time=datetime.datetime.utcnow(),
        )

    config_conversion_path = get_config_conversion_path()
    create_directory(config_conversion_path)

    if input_file:
        filename = secure_filename(input_file.filename)
        input_file.save(os.path.join(config_conversion_path, filename))

    return render_template(template,
                           config_form=config_form,
                           input_filename=input_file.filename,
                           err_msg="",
                           schedule_form=schedule_form,
                           install_action=get_install_migrations_dict(),
                           server_time=datetime.datetime.utcnow())
示例#13
0
def download_loop(clear, verbose):
    create_directory("pdf", clear)

    INICIO = dt.date(2020, 4, 14)
    numero_dias = (INICIO.today() - INICIO).days
    for n in range(numero_dias + 1):
        data = INICIO + dt.timedelta(n)
        msg = download_if_inexistent(data)
        if verbose:
            print(msg)
示例#14
0
def make_database_views(input_dir, output_dir):
    print("--- STEP 3: CREATE DATABASE VIEWS")

    scripts_output_dir = output_dir + "scripts/"
    create_directory(scripts_output_dir)

    make_lea_views(input_dir, scripts_output_dir)
    make_schools_views(input_dir, scripts_output_dir)

    print(f"    * Database Views Complete ")
示例#15
0
def init():
    # Create the necessary supporting directories
    create_directory(get_log_directory())
    create_directory(get_repository_directory())
    create_directory(get_temp_directory())
    create_directory(get_migration_directory())
    create_directory(get_doc_central_directory())

    if not is_ldap_supported():
        print('LDAP authentication is not supported because it has not been installed.')
示例#16
0
def init():
    # Create the necessary supporting directories
    create_directory(get_log_directory())
    create_directory(get_repository_directory())
    create_directory(get_temp_directory())
    create_directory(get_migration_directory())
    create_directory(get_doc_central_directory())

    if not is_ldap_supported():
        print(
            'LDAP authentication is not supported because it has not been installed.'
        )
示例#17
0
def main(_file='', _data={}) -> int:

    if (_file):
        _data = read_data(_file) if check_file(_file) else {}

        if not _data:
            print_invalid_file(_file)
            return 0

    COURSE_NAME = _data['course']
    EVALUATION = _data['evaluation']
    OCR = _data['ocr']
    COPIES = _data['copies'] if _data.get('copies') else 1

    create_directory(f'{os.getcwd()}/ANSWER_SHEETS/') if not os.path.exists(
        f'{os.getcwd()}/ANSWER_SHEETS/') else None

    ANSWER_SHEETS_DIR_PATH = f'{os.getcwd()}/ANSWER_SHEETS/{COURSE_NAME}_{EVALUATION}'

    cool_print(
        f'\nAnswer sheets will be available in: {ANSWER_SHEETS_DIR_PATH}',
        style='result')

    _current_time = asctime(localtime(time()))
    cool_print(f'\n\nInitializing program...[{_current_time}]', style='info')

    if png_template_exists(f'{TEMPLATE_DIRECTORY}/{_data["template"]}'):
        cool_print_decoration(
            'ERROR: Found template in .pdf format.\nConverting template to .png format...',
            'danger')
        pdf_to_png(f'{TEMPLATE_DIRECTORY}/{_data["template"]}.pdf')

    create_directory(ANSWER_SHEETS_DIR_PATH
                     ) if not os.path.exists(ANSWER_SHEETS_DIR_PATH) else None

    _data['template'] = f'{TEMPLATE_DIRECTORY}/{_data["template"]}.png'
    cool_print(f'\nPreparing data...', style='info')
    _files = make_files(_data, _save_file=False)

    cool_print(f'\nAdding texts to templates...', style='info')
    _answer_sheets = composite_multiple_images(
        _files, _save_pages=False, _save_path=ANSWER_SHEETS_DIR_PATH, ocr=OCR)

    _answer_sheets = _answer_sheets if COPIES == 1 else make_copies(
        _answer_sheets, COPIES)

    cool_print(f'\nMerging files...[could take a while]', style='info')
    pdf_merger(_answer_sheets, f'{ANSWER_SHEETS_DIR_PATH}/compilation.pdf')

    cool_print(f'\nDone!', style='info')

    return 1
示例#18
0
def convert_loop(clear, verbose):
    create_directory("txt", clear)
    lista_pdfs = sorted(glob("./pdf/*.pdf"), key=getmtime)
    for arquivo in lista_pdfs:
        txt_path = arquivo.replace("pdf", "txt", 2)
        if exists(txt_path):
            if verbose:
                print(f"{txt_path} já existe")
        else:
            comando = f"pdftotext {arquivo} {txt_path}"
            if verbose:
                print(f"Convertendo {arquivo}")
            os.system(comando)
示例#19
0
    def load_skin(self, skin_name, system_skin_dir=None, user_skin_dir=None):
        '''Load skin, return True if load finish, otherwise return False.'''
        try:
            # Save skin dir.
            self.skin_name = skin_name
            
            if system_skin_dir:
                self.system_skin_dir = system_skin_dir
                create_directory(self.system_skin_dir)
                
            if user_skin_dir:
                self.user_skin_dir = user_skin_dir
                create_directory(self.user_skin_dir)
                
            self.skin_dir = self.get_skin_dir()
            
            # Load config file.
            self.config = Config(self.get_skin_file_path("config.ini"))
            self.config.load()
            
            # Get theme config.
            self.theme_name = self.config.get("theme", "theme_name")
            
            # Get application config.
            self.app_id = self.config.get("application", "app_id")
            self.app_version = self.config.getfloat("application", "app_version")
            
            # Get background config.
            self.image = self.config.get("background", "image")
            self.x = self.config.getfloat("background", "x")
            self.y = self.config.getfloat("background", "y")
            self.scale_x = self.config.getfloat("background", "scale_x")
            self.scale_y = self.config.getfloat("background", "scale_y")
            self.dominant_color = self.config.get("background", "dominant_color")
            
            # Get action config.
            self.deletable = self.config.getboolean("action", "deletable")
            self.editable = self.config.getboolean("action", "editable")
            self.vertical_mirror = self.config.getboolean("action", "vertical_mirror")
            self.horizontal_mirror = self.config.getboolean("action", "horizontal_mirror")
            
            # Generate background pixbuf.
            self.background_pixbuf = gtk.gdk.pixbuf_new_from_file(self.get_skin_file_path(self.image))
            
            # Save skin name.
            self.save_skin_name()

            return True
        except Exception, e:
            print "load_skin error: %s" % (e)
            return False
示例#20
0
文件: theme.py 项目: netphi/deepin-ui
 def __init__(self, system_theme_dir, user_theme_dir):
     '''Init theme.'''
     # Init.
     self.system_theme_dir = system_theme_dir
     self.user_theme_dir = user_theme_dir
     self.theme_info_file = "theme.txt"
     self.ticker = 0
     self.pixbuf_dict = {}
     self.color_dict = {}
     self.alpha_color_dict = {}
     self.shadow_color_dict = {}
     
     # Create directory if necessarily.
     for theme_dir in [self.system_theme_dir, self.user_theme_dir]:
         create_directory(theme_dir)
示例#21
0
    def __init__(self, **kwargs):
        ReportWriter.__init__(self, **kwargs)
        self.user = kwargs.pop('user')
        self.smu_loader = kwargs.pop('smu_loader')
        self.smu_list = kwargs.pop('smu_list')
        self.sp_list = kwargs.pop('sp_list')

        self.smu_list = sorted(self.smu_list, key=lambda x: x.posted_date, reverse=True)
        self.sp_list = sorted(self.sp_list, key=lambda x: x.posted_date, reverse=True)

        temp_user_dir = create_temp_user_directory(self.user.username)
        self.output_file_directory = os.path.normpath(os.path.join(temp_user_dir, 'software_information_export'))

        create_directory(self.output_file_directory)
        make_file_writable(self.output_file_directory)
示例#22
0
    def __init__(self, **kwargs):
        ReportWriter.__init__(self, **kwargs)
        self.user = kwargs.pop('user')
        self.region_name = kwargs.pop('region_name')

        self.chassis_summary_iter = kwargs.pop('chassis_summary_iter')
        self.model_name_summary_iter = kwargs.pop('model_name_summary_iter')

        self.inventory_without_serial_number_iter = kwargs.pop('inventory_without_serial_number_iter')
        self.inventory_with_duplicate_serial_number_iter = kwargs.pop('inventory_with_duplicate_serial_number_iter')

        temp_user_dir = create_temp_user_directory(self.user.username)
        self.output_file_directory = os.path.normpath(os.path.join(temp_user_dir, 'inventory_information_export'))

        create_directory(self.output_file_directory)
        make_file_writable(self.output_file_directory)
示例#23
0
    def deconstruct(self):
        """Deconstruct an entire combined image file into the individual
        blocks."""
        # Do some sanity checks on input.
        if not os.path.exists(self.img_path):
            self.error_handler("Input file does not exist: %s" % (self.img_path))
        self.source = Image.open(self.img_path)
        (source_width, source_height) = self.source.size

        if (source_width % self.grid_width != 0) or (source_height % self.grid_height != 0):
            self.error_handler("Invalid image size.")

        self.block_size = source_width / self.grid_width
        if self.block_size != (source_height / self.grid_height):
            self.error_handler("Blocks not square.")

        res = utils.create_directory(self.workdir)
        if res != True:
            self.error_handler(res)

        # Loop over everything, ignoring blank spaces.
        for i in xrange(self.grid_width):
            for j in xrange(self.grid_height):
                if (i, j) in self.map:
                    block = self.map[(i, j)]
                    self.write_block_to_file(i, j, block)
示例#24
0
def transform_data(edges_file,
                   edges_tuples,
                   output_dir="",
                   several_out_files=False,
                   undirected=False):
    '''
    Loads the data from a provided file and transforms the data to a two column dataframe with source and target nodes (edge_index/hyperedge_index). Finally saves data to csv file/files.
    Args:
    edges_file: filepath to a csv file where nodes and hyperrelations are columns and traversed graph paths are rows
    edge_tuples: list of tuples: (node, containing hyperrelation) or (hyperrelation, containing hyperrelation)
    output_dir: directory where the output dataframe should be saved
    several_out_files: if True saves transformed dataframes separately for each tuple from 'edge_tuples' (default: False)
    undirected: if True creates an edge_index file/files for undirected graph
    '''
    utils.create_directory(output_dir)
    edges_df = pd.read_csv(edges_file)
    for column in edges_df.columns:
        edges_df[column] = edges_df[column].str.replace("V", "")

    temp_cols = ["source", "target"]
    if several_out_files == True:
        for edge_tuple in edges_tuples:
            temp_df = pd.DataFrame(columns=temp_cols)
            file_out_name = "{}{}_{}_edges.csv".format(output_dir,
                                                       edge_tuple[0],
                                                       edge_tuple[1])
            temp_df[temp_cols] = edges_df[edge_tuple]
            if undirected:
                temp_df = make_undirected(temp_df)
            temp_df.to_csv(file_out_name, index=False)
    else:
        temp_df = pd.DataFrame(columns=temp_cols)
        for edge_tuple in edges_tuples:
            temp_df = pd.concat([
                temp_df,
                edges_df[edge_tuple].rename(columns={
                    edge_tuple[0]: temp_cols[0],
                    edge_tuple[1]: temp_cols[1]
                })
            ],
                                ignore_index=True)
        temp_df = temp_df.drop_duplicates()
        if undirected:
            temp_df = make_undirected(temp_df)
        temp_df.to_csv(output_dir + "all_edges_index.csv", index=False)
示例#25
0
def setup():  # returns None
    # create directory if needed
    for dir in const.BASIC_DIR_TREE:
        if utils.check_if_directory_exists(dir) == False:
            utils.create_directory(dir)
    # create files if needed
    if utils.check_if_file_exists(const.CONFIG_FILE) == False:
        # utils.create_file(const.CONFIG_FILE)
        do_file_from_list(const.CONFIG_FILE, const.DEFAULT_CONFIG_FILE)
    if utils.check_if_file_exists(const.KEYS_DOWN_FILE) == False:
        utils.create_file(const.KEYS_DOWN_FILE)
        do_file_from_list(const.KEYS_DOWN_FILE, const.DEFAULT_KEYS_DOWN_FILE)
    if utils.check_if_file_exists(const.KEYS_UP_FILE) == False:
        utils.create_file(const.KEYS_UP_FILE)
        do_file_from_list(const.KEYS_UP_FILE, const.DEFAULT_KEYS_UP_FILE)
    if utils.check_if_file_exists(const.MODS_FILE) == False:
        utils.create_file(const.MODS_FILE)
        do_file_from_list(const.MODS_FILE, const.DEFAULT_MODS_FILE)
def download_from_object_storage(config, directory, bucket_name,
                                 object_name_list):
    object_storage = oci.object_storage.ObjectStorageClient(config)
    namespace = object_storage.get_namespace().data

    # verify local folders to create
    object_folder_list = set(o.rsplit("/", 1)[0] for o in object_name_list)
    for object_folder in object_folder_list:
        utils.create_directory(os.path.join(directory, object_folder))

    for object_name in object_name_list:
        print('Downloading object: {}'.format(object_name))
        get_obj = object_storage.get_object(namespace, bucket_name,
                                            object_name)
        with open(os.path.join(directory, object_name), 'wb') as f:
            for chunk in get_obj.data.raw.stream(1024 * 1024,
                                                 decode_content=False):
                f.write(chunk)
示例#27
0
    def __init__(self, **kwargs):
        ReportWriter.__init__(self, **kwargs)
        self.user = kwargs.pop('user')
        self.serial_number = kwargs.pop('serial_number')
        self.region_names = kwargs.pop('region_names')
        self.chassis_types = kwargs.pop('chassis_types')
        self.software_versions = kwargs.pop('software_versions')
        self.model_names = kwargs.pop('model_names')
        self.partial_model_names = kwargs.pop('partial_model_names')
        self.vid = kwargs.pop('vid')

        self.available_inventory_iter = kwargs.pop('available_inventory_iter')
        self.in_use_inventory_iter = kwargs.pop('in_use_inventory_iter')

        temp_user_dir = create_temp_user_directory(self.user.username)
        self.output_file_directory = os.path.normpath(os.path.join(temp_user_dir, 'inventory_information_export'))

        create_directory(self.output_file_directory)
        make_file_writable(self.output_file_directory)
示例#28
0
def export_command_profiles():
    db_session = DBSession()
    profiles_list = request.args.getlist('profiles_list[]')[0].split(",")
    db_profiles = db_session.query(CustomCommandProfile).all()
    d = {"CSM Server:Custom Command Profile": {}}

    for profile in db_profiles:
        if profile.profile_name in profiles_list:
            d["CSM Server:Custom Command Profile"][profile.profile_name] = profile.command_list

    temp_user_dir = create_temp_user_directory(current_user.username)
    custom_command_export_temp_path = os.path.normpath(os.path.join(temp_user_dir, "custom_command_export"))
    create_directory(custom_command_export_temp_path)
    make_file_writable(custom_command_export_temp_path)

    with open(os.path.join(custom_command_export_temp_path, 'custom_command_profiles.json'), 'w') as command_export_file:
        command_export_file.write(json.dumps(d, indent=2))

    return send_file(os.path.join(custom_command_export_temp_path, 'custom_command_profiles.json'), as_attachment=True)
示例#29
0
文件: common.py 项目: smjurcak/csm
def download_session_logs(file_list):
    if hasattr(current_user, 'username'):
        username = current_user.username
    else:
        username = g.api_user.username

    temp_user_dir = create_temp_user_directory(username)
    session_zip_path = os.path.normpath(os.path.join(temp_user_dir, "session_logs"))
    zip_file = os.path.join(session_zip_path, "session_logs.zip")
    create_directory(session_zip_path)
    make_file_writable(session_zip_path)

    zout = zipfile.ZipFile(zip_file, mode='w')
    for f in file_list:
        zout.write(os.path.normpath(f), os.path.basename(f))

    zout.close()

    return send_file(zip_file, as_attachment=True)
示例#30
0
    def write(self, dpath, use_text=True, use_tags=True, overwrite=False):
        r"""
        The patient XML tree is written to a file.

        Only the directory path needs to be specified, since the
        filename is already defined. The <TEXT> and <TAGS> fields can be
        omitted.

        Parameters
        ----------
        dpath : str
            The output directory path. If necessary, directories are
            created. The filename is the same (as defined).
        use_text : bool, optional
            If `True` the generated XML contains the <TEXT> field,
            otherwise the <TEXT> field is omitted. Default: `True`.
        use_tags : bool, optional
            If `True` the generated XML contains the <TAGS> field,
            otherwise the <TAGS> field is omitted. Default: `True`.
        overwrite: bool, optional
            If `True` overwritten is allowed, otherwise it is not.
            Default: `False`.

        Raises
        ------
        AssertionError
            The output directory path cannot be the same as the input
            directory path.

        """
        dpath = os.path.abspath(dpath)
        # create directory (if necessary)
        create_directory(dpath)
        e = 'Writing to the same directory is not allowed!'
        assert not os.path.samefile(self._dpath, dpath), e
        fpath = os.path.join(dpath, self._fname)
        if os.path.exists(fpath):
            e = 'Filepath already exists and it is not a file!'
            assert os.path.isfile(fpath), e
            assert overwrite, 'Overwitten is not allowed!'
        with open(fpath, mode='w', encoding='utf-8') as f:
            _ = f.write(self.to_xml(use_text=use_text, use_tags=use_tags))
示例#31
0
def convert_config(db_session, http_request, template, schedule_form):

    config_form = init_config_form(db_session, http_request, get=True)

    success, err_msg = download_latest_config_migration_tool()
    if not success:
        return render_template(template, config_form=config_form,
                               input_filename="", err_msg=err_msg, schedule_form=schedule_form,
                               install_action=get_install_migrations_dict(),
                               server_time=datetime.datetime.utcnow())

    # check if the post request has the file part
    if 'file' not in http_request.files:
        flash('No file in request.')
        return render_template(template, config_form=config_form,
                               input_filename="", err_msg="Internal error - No file.",
                               schedule_form=schedule_form,
                               install_action=get_install_migrations_dict(),
                               server_time=datetime.datetime.utcnow(),)

    input_file = http_request.files['file']
    # if user does not select file, browser also
    # submit a empty part without filename
    if input_file.filename == '':
        flash('No selected file.')
        return render_template(template, config_form=config_form,
                               input_filename="", err_msg="Internal error - No selected file.",
                               schedule_form=schedule_form,
                               install_action=get_install_migrations_dict(),
                               server_time=datetime.datetime.utcnow(),)

    config_conversion_path = get_config_conversion_path()
    create_directory(config_conversion_path)

    if input_file:
        filename = secure_filename(input_file.filename)
        input_file.save(os.path.join(config_conversion_path, filename))

    return render_template(template, config_form=config_form,
                           input_filename=input_file.filename, err_msg="", schedule_form=schedule_form,
                           install_action=get_install_migrations_dict(),
                           server_time=datetime.datetime.utcnow())
示例#32
0
    def write(self, dpath, use_text=True, use_tags=True, overwrite=False):
        r"""
        The patient XML files are written into a specific directory.

        Only the directory path needs to be specified, since the patient
        filenames are already defined. The <TEXT> and <TAGS> fields can
        be omitted.

        Parameters
        ----------
        dpath : str
            The output directory path. If necessary, directories are
            created.
        use_text : bool, optional
            If `True` the generated XML contains the <TEXT> field,
            otherwise the <TEXT> field is omitted. Default: `True`.
        use_tags : bool, optional
            If `True` the generated XML contains the <TAGS> field,
            otherwise the <TAGS> field is omitted. Default: `True`.
        overwrite: bool, optional
            If `True` overwritten is allowed, otherwise it is not.
            Default: `False`.

        Raises
        ------
        AssertionError
            The output directory path cannot be the same as the input
            directory path.

        """
        dpath = os.path.abspath(dpath)
        # create directory (if necessary)
        create_directory(dpath)
        e = 'Writing to the same directory is not allowed!'
        assert not os.path.samefile(self._dpath, dpath), e
        for p in self._patients:
            p.write(
                dpath=dpath,
                use_text=use_text,
                use_tags=use_tags,
                overwrite=overwrite,
            )
示例#33
0
def parse_pdf2img(filename, folder_img):
    try:
        with tempfile.TemporaryDirectory() as tmppath:
            images = convert_from_path(filename,
                                       dpi=80,
                                       fmt='jpeg',
                                       strict=False,
                                       last_page=10,
                                       output_folder=tmppath)

            utils.create_directory(folder_img)
            files = os.listdir(tmppath)
            for file in files:
                src = os.path.join(tmppath, file)
                shutil.move(src, folder_img)
        return True
    except:
        logger.error(("pdf2image could not convert " +
                      " the document '{}'").format(filename))
        return False
示例#34
0
def get_predictions():
    create_directory(os.getenv('PREDICTION_IMAGES_PATH'))

    # get all test files
    test_filenames = get_subfiles(os.getenv('TEST_IMAGES_PATH'))
    # test_filenames = test_filenames[:1000]

    # get test data generator
    testgen = test_generator(os.getenv('TEST_IMAGES_PATH'))
    test_batch_size = len(test_filenames)

    # start testing
    model = get_model()
    print('\nStarting testing ...')
    print('Using model - {}'.format(weight_file_path))
    results = model.predict_generator(testgen, test_batch_size, verbose=1)
    print('DONE !')

    # save predictions
    save_predictions(results, test_filenames)
示例#35
0
def download_session_logs(file_list):
    if hasattr(current_user, 'username'):
        username = current_user.username
    else:
        username = g.api_user.username

    temp_user_dir = create_temp_user_directory(username)
    session_zip_path = os.path.normpath(
        os.path.join(temp_user_dir, "session_logs"))
    zip_file = os.path.join(session_zip_path, "session_logs.zip")
    create_directory(session_zip_path)
    make_file_writable(session_zip_path)

    zout = zipfile.ZipFile(zip_file, mode='w')
    for f in file_list:
        zout.write(os.path.normpath(f), os.path.basename(f))

    zout.close()

    return send_file(zip_file, as_attachment=True)
示例#36
0
def save_predictions(results, test_filenames):
    # save predictions - images and masks
    print('\nSaving test results')
    save_result(os.getenv('PREDICTION_IMAGES_PATH'),
                results,
                test_filenames,
                flag_multi_class=False,
                num_class=2)
    print('DONE !')

    # save image and predictions combined
    if os.getenv('SAVE_COMBINED') == 'TRUE':
        create_directory(os.getenv('COMBINED_IMAGES_PATH'))
        imagesdir = os.getenv('TEST_IMAGES_PATH')
        masksdir = os.getenv('PREDICTION_IMAGES_PATH')
        suffix = '_predict'

        print('\nSaving test results - images and masks combined')
        save_images_and_masks(imagesdir, masksdir, suffix, save=True)
        print('DONE !')
示例#37
0
def encode_ids(edge_csv_file_path, nodes_labs_file_path, output_path):
    '''
    Encodes nodes or hyperrelations ids so that they are within range 1...N where N is number of nodes in nodes_labels. Saves data to csv in provided directory.
    Args:
    edge_csv_file_path: filepath to a csv file where nodes and hyperrelations are columns and traversed graph paths are rows
    nodes_labs_file_path: filepath to a two columns csv files with nodes ids and their labels
    output_path: file directory to save encoded dataframes
    '''
    utils.create_directory(output_path)
    edge_index_df = pd.read_csv(edge_csv_file_path)
    nodes_labels = pd.read_csv(nodes_labs_file_path)

    lbl_encoder = LabelEncoder()
    lbl_encoder.fit(nodes_labels["node"])
    nodes_labels["node"] = lbl_encoder.transform(nodes_labels["node"])
    edge_index_df["source"] = lbl_encoder.transform(edge_index_df["source"])
    edge_index_df["target"] = lbl_encoder.transform(edge_index_df["target"])
    edge_index_df.to_csv("{}all_edges_index.csv".format(output_path),
                         index=False)
    nodes_labels.to_csv("{}nodes_labels.csv".format(output_path), index=False)
示例#38
0
def export_software_profiles():
    db_session = DBSession()

    software_profile_list = request.args.getlist('software_profile_list[]')[0].split(",")
    software_profiles = get_software_profile_list(db_session)
    d = {"CSM Server:Software Profile": {}}

    for software_profile in software_profiles:
        if software_profile.name in software_profile_list:
            d["CSM Server:Software Profile"][software_profile.name] = software_profile.packages

    temp_user_dir = create_temp_user_directory(current_user.username)
    software_profile_export_temp_path = os.path.normpath(os.path.join(temp_user_dir, "software_profile_export"))
    create_directory(software_profile_export_temp_path)
    make_file_writable(software_profile_export_temp_path)

    with open(os.path.join(software_profile_export_temp_path, 'software_profiles.json'), 'w') as command_export_file:
        command_export_file.write(json.dumps(d, indent=2))

    return send_file(os.path.join(software_profile_export_temp_path, 'software_profiles.json'), as_attachment=True)
示例#39
0
def setup_data():
    pretty_print("SETUP DATA")
    extract_directory = f"{INPUT_DIR}extracts/"
    """
    See if needed files currently exist in input directory.
    If not, see if extract file already exists correctly
    If not, retrieve and extract accordingly
    Move extracted files to correct directory and simplified filename
    Remove extra directory and files
    """
    if (needed_files_exists()):
        pretty_print("Needed Files Already Exist", True)
    else:
        if (extracted_files_exists(extract_directory)):
            pretty_print("Extract Already Exist", True)
        else:
            create_directory(extract_directory)
            pretty_print("Fetching CRDC Data From Public Website (34MB)", True)
            zip_file_name = get_filename_from_url(CRDC_DATA_URL)
            zip_file_name = fetch_file(CRDC_DATA_URL, extract_directory,
                                       zip_file_name)

            pretty_print("Extracting Zip At ", True,
                         extract_directory + zip_file_name)
            unzip(extract_directory + zip_file_name, extract_directory)

        pretty_print("Moving Files In Place", True)
        formatted_files_list = list(
            map(
                lambda x: {
                    "src_path": x["extracted_path"],
                    "dest_path": x["needed_file_name"]
                }, CRDC_FILES))
        rename_files(formatted_files_list, extract_directory, INPUT_DIR)

        pretty_print("Cleaning Up", True)
        remove_directory(extract_directory)

    # create_directory(OUTPUT_DIR, True)

    create_directory(MIGRATION_DIR, True)
示例#40
0
 def __init__(self, 
              system_theme_dir, 
              user_theme_dir):
     '''
     Initialize Theme class.
     
     @param system_theme_dir: Default theme directory.
     @param user_theme_dir: User's theme save directory, generic is ~/.config/project-name/theme
     '''
     # Init.
     self.system_theme_dir = system_theme_dir
     self.user_theme_dir = user_theme_dir
     self.theme_info_file = "theme.txt"
     self.ticker = 0
     self.pixbuf_dict = {}
     self.color_dict = {}
     self.alpha_color_dict = {}
     self.shadow_color_dict = {}
     
     # Create directory if necessarily.
     for theme_dir in [self.system_theme_dir, self.user_theme_dir]:
         create_directory(theme_dir)
示例#41
0
def setup_data(input_dir="./input/"):
    print("--- STEP 1: SETUP DATA")
    extract_directory = f"{input_dir}extracts/"
    """
    See if needed files currently exist in input directory.
    If not, see if extract file already exists correctly
    If not, retrieve and extract accordingly
    Move extracted files to correct directory and simplified filename
    Remove extra directory and files
    """
    if (needed_files_exists(input_dir)):
        print("    * Needed Files Already Exist")
    else:
        if (extracted_files_exists(extract_directory)):
            print("    * Extract Already Exist")
        else:
            create_directory(extract_directory)
            print("    * Fetching CRDC Data From Public Website (34MB)")
            zip_file_name = get_filename_from_url(CRDC_DATA_URL)
            zip_file_name = fetch_file(CRDC_DATA_URL, extract_directory,
                                       zip_file_name)

            print("    * Extracting Zip At ",
                  extract_directory + zip_file_name)
            unzip(extract_directory + zip_file_name, extract_directory)

        print("    * Moving Files In Place")
        formatted_files_list = list(
            map(
                lambda x: {
                    "src_path": x["extracted_path"],
                    "dest_path": x["needed_file_name"]
                }, CRDC_FILES))
        rename_files(formatted_files_list, extract_directory, input_dir)

        print("    * Cleaning Up")
        remove_directory(extract_directory)

    print("    * Setup Complete")
示例#42
0
def optimise_parameters():
    pyramid_levels = [3, 4, 5]
    # pyramid_levels = [x for x in range(3,6)]
    rotations = [[x for x in range(0, 360, rot)] for rot in range(20, 35, 5)]
    gaussian_parameters = [[5, 5, 15]]

    # Create results directory
    utils.create_directory(config.RESULTS_DIR)

    for level in pyramid_levels:
        for rots in rotations:
            for g_n, gaussian in enumerate(gaussian_parameters):
                step_size = rots[1] - rots[0]
                row, col, dev = gaussian
                g = utils.gaussian_kernel(row, col, dev)
                utils.delete_directory(config.TEMPLATE_OUTPUT_DIR)
                print(
                    'training rotation {} level {} gaussian {}-{}-{}'.format(
                        step_size, level, row, col, dev), rots, level)

                start = time.time()

                template_matching(config.TRAINING_DIR,
                                  config.TEMPLATE_OUTPUT_DIR, level, rots, g)
                new_dir = config.RESULTS_DIR + 'level{}-rot{}-g-{}-{}-{}/'.format(
                    level, step_size, row, col, dev)
                utils.create_directory(new_dir)
                print('testing', rots, level)
                images = test_template_matching(config.TESTING_DIR,
                                                config.TEMPLATE_OUTPUT_DIR)
                end = time.time()
                time_elapsed = end - start
                utils.write_to_file(new_dir + 'time.txt', time_elapsed)

                for idx, im in enumerate(images):
                    cv2.imwrite(new_dir + '{}.png'.format(idx), im)

    return True
示例#43
0
def template_matching(data_dir,
                      template_dir,
                      pyramid_depth=4,
                      rotations=None,
                      gaussian=None):
    if rotations is None:
        rotations = [x for x in range(0, 360, 30)]

    if gaussian is None:
        gaussian = utils.gaussian_kernel(5, 5, 15)

    image_names = utils.get_files(data_dir, extension='.png')
    for image_name in image_names:
        image = cv2.imread(data_dir + image_name + '.png',
                           cv2.IMREAD_UNCHANGED)
        image_filtered = tm.pre_process_image(image)

        # Create a Gaussian pyramid of given depth for each image
        pyramid = tm.create_gaussian_pyramid(image_filtered, gaussian,
                                             pyramid_depth)

        # Create directory for class
        image_class = tm.get_class_name(image_name)
        class_dir = template_dir + image_class + '/'
        utils.create_directory(class_dir)
        # Path(class_dir).mkdir(parents=True, exist_ok=True)

        # Rotate each scaled image
        for scale_index, scaled_image in enumerate(pyramid):
            for angle in rotations:
                rotated_image = tm.rotate_image(scaled_image, angle)

                # Save image to png file
                file_name = image_class + "-level" + str(
                    scale_index) + "-rotation" + str(angle) + ".png"
                cv2.imwrite(class_dir + file_name, rotated_image)

    return True
示例#44
0
    def __init__(self, **kwargs):
        super(ConformanceReportWriter, self).__init__(**kwargs)
        self.style_title = xlwt.easyxf('font: height 350, bold on; align: vert centre, horiz center;')
        self.style_bold = xlwt.easyxf('font: bold on, height 260;')
        self.style_summary = xlwt.easyxf('font: height 220;')
        self.style_center = xlwt.easyxf('align: vert centre, horiz center;')

        self.user = kwargs.pop('user')
        self.conformance_report = kwargs.pop('conformance_report')
        self.locale_datetime = kwargs.pop('locale_datetime')
        self.include_host_packages = kwargs.pop('include_host_packages')
        self.exclude_conforming_hosts = kwargs.pop('exclude_conforming_hosts')

        self.wb = xlwt.Workbook()
        self.ws = self.wb.add_sheet('Conformance Report')
        self.ws.set_portrait(False)

        temp_user_dir = create_temp_user_directory(self.user.username)
        self.output_file_directory = os.path.normpath(os.path.join(temp_user_dir, "conformance_report"))

        create_directory(self.output_file_directory)
        make_file_writable(self.output_file_directory)

        self.row = 0
示例#45
0
    def reconstruct(self, res=[16]):
        """Assemble the grid from the individual block files.
        If more than one resolution is desired, the largest resolution is
        used and the end result is scaled down. The output files will have
        the size denoted in the filename."""
        res = list(set(res))  # Squish duplicates.
        if len(res) == 0:
            self.error_handler("No resolutions selected.")
        res.sort(reverse=True)

        resp = utils.create_directory(self.outdir)
        if resp != True:
            utils.raise_error(resp)

        self.block_size = res.pop(0)

        self.output = Image.new(
            "RGBA", (self.block_size * self.grid_width, self.block_size * self.grid_height), (0, 0, 0, 0)
        )

        # Loop over everything, ignoring empty spaces.
        for i in xrange(self.grid_width):
            for j in xrange(self.grid_height):
                if (i, j) in self.map:
                    block = self.map[(i, j)]
                    self.read_and_paste_block(i, j, block)

        # Save the primary output size.
        name_base = os.path.split(self.img_path)[1]
        ext = "_" + str(self.block_size) + "x" + str(self.block_size) + ".png"
        outfile = self.outdir + os.sep + name_base.replace(".png", ext)
        self.output.save(outfile)

        # Save additional output sizes.
        for size in res:
            ext = "_" + str(size) + "x" + str(size) + ".png"
            outfile = self.outdir + os.sep + name_base.replace(".png", ext)
            dims = (size * self.grid_width, size * self.grid_height)
            img = self.output.resize(dims, Image.BICUBIC)
            img.save(outfile)
示例#46
0
文件: train-lstm.py 项目: chunmun/fyp
parser.add_argument('--fixed-embeddings',\
        help='Loads the corresponding embeddings from the given word embedding file')

parser.add_argument('--learn-embeddings',\
        help='Loads the corresponding embeddings from that only exists in the test sentence')

if __name__=="__main__":
    args = parser.parse_args()
    md = Metadata(args, args.filename, args.fixed_embeddings or args.learn_embeddings)

    varlist = list(map(str, [os.path.basename(args.filename), os.path.basename(args.validation_filename), \
        args.iterations, args.hidden, args.l2, args.dropout_rare, args.dropout,\
        args.fixed_embeddings is not None, args.learn_embeddings is not None]))

    directory_model = 'Model_' + '_'.join(varlist)
    utils.create_directory(directory_model)

    if args.load_reader:
        print('... loading reader')
        with open(os.path.join(directory_model, 'reader.pkl'), 'rb') as f:
            reader = pickle.load(f)
    else:
        print('... Generating new reader')
        reader = Reader(md, minimum_occurrence=2)
        #reader.save(directory_model)

    # Generate the training set
    num_sentences = len(reader.sentences)
    num_words = len(reader.word_dict)
    num_tags = len(reader.tag_dict)
示例#47
0
def get_config_conversion_path():
    temp_user_dir = create_temp_user_directory(current_user.username)
    config_conversion_path = os.path.normpath(os.path.join(temp_user_dir, "config_conversion"))
    create_directory(config_conversion_path)
    return config_conversion_path
示例#48
0
# By importing models here, it forces creation of tables in the database for a new installation.
# This will prevent gunicorn workers from trying to create the database tables all at the same time. 
# See csmserver launch script
import models

from utils import create_directory
from constants import get_autlogs_directory, get_repository_directory, get_temp_directory

# Create the necessary supporting directories
create_directory(get_autlogs_directory())
create_directory(get_repository_directory())
create_directory(get_temp_directory())
示例#49
0
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from models import initialize
from models import SystemVersion 
from sqlalchemy import inspect
from database import DBSession, CURRENT_SCHEMA_VERSION, engine

from utils import create_directory, is_ldap_supported
from constants import get_log_directory, get_repository_directory, get_temp_directory
from schema.loader import get_schema_migrate_class

import traceback

# Create the necessary supporting directories
create_directory(get_log_directory())
create_directory(get_repository_directory())
create_directory(get_temp_directory())


def init():
    if not is_ldap_supported():
        print('LDAP authentication is not supported because it has not been installed.')

    db_session = DBSession()
    system_version = SystemVersion.get(db_session)

    # Handles database schema migration starting from the next schema version
    for version in range(system_version.schema_version + 1, CURRENT_SCHEMA_VERSION + 1):
        handler_class = get_schema_migrate_class(version)
        if handler_class is not None: