Exemple #1
0
def co_savings(cfg: Dict[str, Any]) -> Iterator[None]:
    """
    Saves vsz, exports images and saves hdf5 log
    Corutine must receive:
        veusze: Veusz embedded object
        log: dict with parameters: 'out_name' - log's index, 'out_vsz_full' - vsz file name to save

    log parameters will be saved to pandas dataframe end then to hdf5 log cfg['program']['log'])[0]+'.h5'
    """
    with pd.HDFStore(Path(cfg['program']['log']).with_suffix('.h5'),
                     mode='a') as storeLog:
        veusze = None
        if __name__ != '__main__':
            path_prev = os_getcwd()
            os_chdir(cfg['out']['path'].parent)
        print('Saving to {}'.format(cfg['out']['path'].parent.absolute()))
        try:
            while True:
                veusze, log = yield ()
                if not cfg['out']['b_images_only']:
                    veusze.Save(str(log['out_vsz_full']))
                    # Save vsz modification date
                    log['fileChangeTime'] = datetime.fromtimestamp(
                        Path(log['out_vsz_full']).stat().st_mtime),
                    dfLog = pd.DataFrame.from_records(
                        log,
                        exclude=['out_name', 'out_vsz_full'],
                        index=[log['out_name']])
                    storeLog.append(Path(cfg['out']['path']).name,
                                    dfLog,
                                    data_columns=True,
                                    expectedrows=cfg['in']['nfiles'],
                                    index=False,
                                    min_itemsize={'index': 30})
                if cfg['async']['loop']:
                    try:  # yield from     asyncio.ensure_future(
                        # asyncio.wait_for(, cfg['async']['export_timeout_s'], loop=cfg['async']['loop'])
                        b = cfg['async']['loop'].run_until_complete(
                            export_images_timed(veusze, cfg,
                                                '#' + log['out_name']))
                    except asyncio.TimeoutError:
                        l.warning('can not export in time')
                else:
                    export_images(veusze, cfg['out'], '#' + log['out_name'])
        except GeneratorExit:
            print('Ok>')
        finally:
            if __name__ != '__main__':
                os_chdir(path_prev)
            if veusze and cfg['program']['return'] != '<embedded_object>':
                veusze.Close()
                l.info('closing Veusz embedded object')
Exemple #2
0
def chdir(target):
    """Context manager for executing some code within a different
    directory after which the current working directory will be set
    back to what it was before.

    Args:
        target (str): path to the directory to change into.
    """
    current = getcwd()
    try:
        os_chdir(target)
        yield target
    finally:
        os_chdir(current)
Exemple #3
0
def chdir(path=None):
    print(f'===== path_ops/chdir =====')

    if (path == None):
        raise ValueError("No path is received")
    else:
        # Parse string to path object
        path = dir_check(path, dir_create=False, return_type='Path')
        print(f'target_dir = {path}')
        path = pathlib.PurePath(path)

        os_chdir(path)
        cwd = pathlib.Path.cwd()
        print(f'cwd = {cwd}')
Exemple #4
0
def cleStart():
    ## base
    from json import load as j_loads
    v_lst = ("p2p_packet_info.txt", "packet_info.txt", "profile.txt", "stdout",
             "syslog", "syserr", "usage.txt", "VERSION.txt", "DEV_LOG.log",
             "mob_count", "*.core")
    szPWD = os_getcwd()
    ## clear files from alog
    with open("clear.list", "r") as fList:
        mList = j_loads(fList)
    for dic1 in mList:
        # goto alog path
        # print dic1["path"]
        os_chdir(dic1["path"])
        # clean files
        fShell("cat /dev/null > PTS")
        fShell("rm -rf log/* cores/*")
        # goto base again
        os_chdir(szPWD)
    ## clean other logs
    with open("start.list", "r") as fList:
        mList = j_loads(fList)
    for dic1 in mList:
        # goto alog path
        # print dic1["path"]
        os_chdir(dic1["path"])
        fShell("echo --- delete inside '%s' ---" % dic1["path"])
        fShell("rm -fv %s" % " ".join(v_lst))
        # goto base again
        os_chdir(szPWD)
Exemple #5
0
def chdir(target):
    """Context manager for executing code within a different directory.
    After the execution, the current working directory will be set back to its initial value.

    Args:
        target (str): path to the directory to change into.

    """
    # from os import chdir, #getcwd
    current = getcwd()
    try:
        os_chdir(target)
        yield target
    finally:
        os_chdir(current)
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 18:13:47 2021

@author: Meghana
"""
from os import path as os_path, chdir as os_chdir

os_chdir(os_path.dirname(os_path.abspath(__file__)))
from sys import path as sys_path

# insert at 1, 0 is the script path (or '' in REPL)
sys_path.insert(1, './functions_py3/')
from jaccard_coeff import jaccard_coeff
from numpy import percentile as np_percentile
from read_complexes import preprocess_complexes
#from pickle import load as pickle_load
import networkx


def NA_threshold(set1, set2):
    ls1 = len(set1)
    ls2 = len(set2)
    if ls1 == 0 and ls2 == 0:
        return 1
    inter = float(len(set1.intersection(set2)))

    a = inter / ls1
    b = inter / ls2
    return max(a, b)
Exemple #7
0
	def FromFile(cls, filePath):
		if (not filePath.exists()): raise PyIpxactException("File '{0!s}' not found.".format(filePath)) from FileNotFoundError(str(filePath))
		
		try:
			with filePath.open(encoding="utf-8") as fileHandle:
				content = fileHandle.read()
				content = bytes(bytearray(content, encoding='utf-8'))
		except OSError as ex:
			raise PyIpxactException("Couldn't open '{0!s}'.".format(filePath)) from ex
		
		os_chdir("lib/schema")
		
		schemaPath = Path("index.xsd")
		try:
			with schemaPath.open(encoding="utf-8") as fileHandle:
				schema = fileHandle.read()
				schema = bytes(bytearray(schema, encoding='utf-8'))
		except OSError as ex:
			raise PyIpxactException("Couldn't open '{0!s}'.".format(schemaPath)) from ex
		
		xmlParser = etree.XMLParser(remove_blank_text=True, encoding="utf-8")
		
		schemaRoot =  etree.XML(schema, xmlParser)
		schemaTree =  etree.ElementTree(schemaRoot)
		xmlschema =   etree.XMLSchema(schemaTree)
		root =        etree.XML(content, xmlParser)
		rootTag =     etree.QName(root.tag)
		
		if (not xmlschema.validate(root)):
			raise PyIpxactException("The input IP-XACT file is not valid.")
		
		if (rootTag.namespace not in __URI_MAP__):
			raise PyIpxactException("The input IP-XACT file uses an unsupported namespace: '{0}'.".format(rootTag.namespace))
		
		if (rootTag.localname != "catalog"):
			raise PyIpxactException("The input IP-XACT file is not a catalog file.")
		
		print("==" * 20)
		
		items = []
		for rootElements in root:
			element = etree.QName(rootElements)
			if (element.localname == "vendor"):
				vendor = rootElements.text
			elif (element.localname == "library"):
				library = rootElements.text
			elif (element.localname == "name"):
				name = rootElements.text
			elif (element.localname == "version"):
				version = rootElements.text
			elif (element.localname == "description"):
				description = rootElements.text
			elif (element.localname == "catalogs"):
				for ipxactFileElement in rootElements:
					items.append(IpxactFile.FromXml(ipxactFileElement))
			else:
				raise PyIpxactException("Unsupported tag '{0}' at root-level.".format(element.localname))
			
		print("==" * 20)
		
		vlnv =    Vlnv(vendor=vendor, library=library, name=name, version=version)
		catalog = cls(vlnv, description=description)
		for item in items:
			catalog.AddItem(item)
			
		return catalog
Exemple #8
0
        f'/{device}/logRuns',  # str: name of log table - table with intervals:
        'pattern_path': path_cruise / device / '~pattern~.vsz'
    }
    f_row = lambda r: [
        '{Index:%y%m%d_%H%M}-{DateEnd:%H%M}.vsz'.format_map(r),
        bytes(
            "time_range = ['{:%Y-%m-%dT%H:%M:%S}', '{:%Y-%m-%dT%H:%M:%S}']".
            format(r['Index'], r['DateEnd'] + pd.Timedelta(300, "s")), 'utf-8')
    ]
    pattern_code = cfg_in['pattern_path'].read_bytes()  #encoding='utf-8'

    from os import getcwd as os_getcwd, chdir as os_chdir
    path_prev = os_getcwd()
    argv_prev = sys.argv

    os_chdir(cfg_in['pattern_path'].parent)
    for filename, str_expr in h5log_names_gen(cfg_in, f_row):
        path_vsz = cfg_in['pattern_path'].with_name(filename)
        path_vsz.write_bytes(
            re.sub(rb'^([^\n]+)', str_expr, pattern_code, count=1))

    veuszPropagate.main([
        'ini/veuszPropagate.ini',
        '--path',
        str(cfg_in['pattern_path'].with_name(
            '??????_????-????.vsz')),  #path_db),
        '--pattern_path',
        f"{cfg_in['pattern_path']}_",  # here used to auto get export dir only. must not be not existed file path
        #'--table_log', f'/{device}/logRuns',
        #'--add_custom_list', f'{device_veusz_prefix}USE_time_search_runs',  # 'i3_USE_timeRange',
        # '--add_custom_expressions',
Exemple #9
0
def main(new_arg=None, veusze=None, **kwargs):
    """
    Initialise configuration and runs or returns routines
    cfg:
        ['program']['log'],
        'out'
        'in'
        'async'
    globals:
        load_vsz
        l

    :param new_arg:
    :param veusze: used to reuse veusz embedded object (thus to not leak memory)
    :return:
    """
    global l, load_vsz
    cfg = cfg_from_args(my_argparser(), new_arg, **kwargs)
    if not cfg or not cfg['program'].get('return'):
        print('Can not initialise')
        return cfg
    elif cfg['program']['return'] == '<cfg_from_args>':  # to help testing
        return cfg

    l = init_logging(logging, None, cfg['program']['log'],
                     cfg['program']['verbose'])
    cfg['program']['log'] = l.root.handlers[
        0].baseFilename  # sinchronize obtained absolute file name

    print('\n' + this_prog_basename(__file__), 'started', end=' ')
    __name__ = '__main__'  # indicate to other functions that they are called from main

    if cfg['out'].get('paths'):
        if not cfg['out']['b_images_only']:
            raise NotImplementedError(
                'Provided out in not "b_images_only" mode!')
        cfg['out']['nfiles'] = len(cfg['out']['paths'])
        cfg['out']['path'] = cfg['out']['paths'][0]
        print(
            end=f"\n- {cfg['out']['nfiles']} output files to export images...")
        pass
    else:
        if cfg['out']['b_images_only']:
            print(
                'in images only mode. Output pattern: ')  # todo Export path: '
        else:
            print('. Output pattern and Data: ')

        try:
            # Using cfg['out'] to store pattern information
            if not Path(cfg['in']['pattern_path']).is_absolute():
                cfg['in']['pattern_path'] = Path(cfg['in']['path']).with_name(
                    str(cfg['in']['pattern_path']))
            cfg['out']['path'] = cfg['in']['pattern_path']
            cfg['out']['paths'], cfg['out']['nfiles'], cfg['out'][
                'path'] = init_file_names(**cfg['out'], b_interact=False)
        except Ex_nothing_done as e:
            if not cfg['out']['b_images_only']:
                l.warning(
                    f'{e.message} - no pattern. Specify it or use "b_images_only" mode!'
                )
                return  # or raise FileNotFoundError?

    if (cfg['out']['b_images_only'] and cfg['out']['paths']):
        cfg['in']['paths'] = cfg['out']['paths']  # have all we need to export
    else:
        try:
            cfg['in']['paths'], cfg['in']['nfiles'], cfg['in'][
                'path'] = init_file_names(**cfg['in'],
                                          b_interact=cfg['program']
                                          ['b_interact'])
        except Ex_nothing_done as e:
            print(e.message)
            return  # or raise FileNotFoundError?
        except TypeError:  # expected str, bytes or os.PathLike object, not NoneType
            # cfg['in']['path'] is None. May be it is not need
            cfg['in']['paths'] = [cfg['in']['pattern_path']
                                  ]  # dummy for compatibility
            cfg['in']['nfiles'] = 1

    cfg['out']['export_dir'] = dir_from_cfg(cfg['out']['path'].parent,
                                            cfg['out']['export_dir'])

    if 'restore_config' in cfg['program']['before_next']:
        cfg['in_saved'] = cfg['in'].copy()
    # Next is commented because reloading is Ok: not need to Close()
    # if cfg['out']['b_images_only'] and not 'Close()' in cfg['program']['before_next']:
    #     cfg['program']['before_next'].append(
    #         'Close()')  # usually we need to load new file for export (not only modify previous file)
    if cfg['program']['export_timeout_s'] and export_images_timed:
        cfg['async'] = {
            'loop': asyncio.get_event_loop(),
            'export_timeout_s': cfg['program']['export_timeout_s']
        }
    else:
        cfg['async'] = {'loop': None}

    load_vsz = load_vsz_closure(cfg['program']['veusz_path'],
                                cfg['program']['load_timeout_s'],
                                cfg['program']['b_execute_vsz'])
    cfg['load_vsz'] = load_vsz
    cfg['co'] = {}
    if cfg['in']['table_log'] and cfg['in']['path'].suffix == '.h5' and not (
            cfg['out']['b_images_only'] and len(cfg['in']['paths']) > 1):
        # load data by ranges from table log rows
        cfg['in']['db_path'] = cfg['in']['path']
        in_fulls = h5log_names_gen(cfg['in'])
    elif cfg['in']['tables']:
        # tables instead files
        in_fulls = ge_names_from_hdf5_paths(cfg)
    else:  # switch to use found vsz as source if need only export images (even with database source)
        in_fulls = ge_names(cfg)

    cor_savings = co_savings(cfg)
    cor_savings.send(None)
    nfiles = 0
    try:  # if True:
        path_prev = os_getcwd()
        os_chdir(cfg['out']['path'].parent)
        if cfg['program']['return'] == '<corutines_in_cfg>':
            cfg['co']['savings'] = cor_savings
            cfg['co']['gen_veusz_and_logs'] = load_to_veusz(in_fulls, cfg)
            cfg['co']['send_data'] = co_send_data(load_to_veusz, cfg,
                                                  cor_savings)
            return cfg  # return with link to generator function
        elif cfg['in'].get('data_yield_prefix'):
            # Cycle with obtaining Veusz data
            cfgin_update = None
            while True:  # for vsz_data, log in cor_send_data.send(cfgin_update):
                try:
                    vsz_data, log = co_send_data.send(cfgin_update)
                    nfiles += 1
                except (GeneratorExit, StopIteration, Ex_nothing_done):
                    break
                if 'f_custom_in_cycle' in cfg['program']:
                    cfgin_update = cfg['program']['f_custom_in_cycle'](
                        vsz_data, log)
        else:
            # Cycle without obtaining Veusz data (or implemented by user's cfg['program']['f_custom_in_cycle'])
            for veusze, log in load_to_veusz(in_fulls, cfg, veusze):
                file_name_r = Path(log['out_vsz_full']).relative_to(
                    cfg['out']['path'].parent)
                if cfg['program'].get('f_custom_in_cycle'):
                    cfgin_update = cfg['program']['f_custom_in_cycle'](veusze,
                                                                       log)
                    veusze_commands(veusze, cfgin_update, file_name_r)
                cor_savings.send((veusze, log))
                nfiles += 1
            cor_savings.close()
            if cfg['program']['return'] != '<embedded_object>':
                veusze = None  # to note that it is closed in cor_savings.close()
        print(f'{nfiles} processed. ok>')

        pass
    except Exception as e:
        l.exception('Not good')
        return  # or raise FileNotFoundError?
    finally:
        if cfg['async']['loop']:
            cfg['async']['loop'].close()
        os_chdir(path_prev)
        if veusze and cfg['program']['return'] == '<end>':
            veusze.Close()
            veusze.WaitForClose()
            veusze = None
        elif cfg['program']['return'] == '<embedded_object>':
            cfg['veusze'] = veusze
            return cfg
Exemple #10
0
    def load_vsz(
        vsz: Union[str, PurePath, None] = None,
        veusze: Optional[str] = None,
        prefix: Optional[str] = None,
        suffix_prior: Optional[str] = '_fbot'
    ) -> Tuple[veusz.Embedded, Optional[Dict[str, Any]]]:
        """
        Load (create) specifid data from '*.vsz' files
        :param vsz: full name of vsz or None. If not None and such file not found then create it
        :param veusze: veusz.Embedded object or None - will be created if None else reused
        :param prefix: only data started with this prefix will be loaded
        :param suffix_prior: high priority names suffix, removes other version of data if starts same but with no such suffix (see veusz_data())
        :return: (veusze, vsz_data):
            - veusze - veusz.Embedded object
            - vsz_data - loaded data if prefix is not None else None
        """
        if vsz is None:
            file_exists = False
            if veusze is None:
                title = 'empty'
                l.debug('new embedded window')
            else:
                l.debug('keep same embedded window')
        else:  # isinstance(vsz, (str, PurePath)):
            vsz = Path(vsz)
            file_exists = vsz.is_file()
            if file_exists:
                l.debug(f'loading found vsz: {vsz}')
                title = f'{vsz} - was found'
            else:
                l.debug(f'creatig vsz: {vsz}')
                title = f'{vsz} - was created'

        if veusze is None:
            # Veusz embedded window construction

            # Save right path in veusz.Embedded (closure)
            if __name__ != '__main__':  # if this haven't done in main()
                path_prev = os_getcwd()  # to recover
                os_chdir(
                    vsz.parent
                )  # allows veusze.Load(path) to work if _path_ is relative or relative paths is used in vsz
            veusze = veusz.Embedded(title)  # , hidden=True
            # veusze.EnableToolbar()
            # veusze.Zoom('page')

            if __name__ != '__main__':
                os_chdir(path_prev)  # recover

        if file_exists:
            if not b_execute_vsz:
                if load_timeout_s:
                    # veusze.Load(str(vsz.name)) with timeout:  # not tried veusze.serv_socket.settimeout(60)
                    SingletonTimeOut.run(partial(veusze.Load, str(vsz.name)),
                                         load_timeout_s)
                    sleep(1)
                else:
                    veusze.Load(vsz.name)
            else:

                def load_by_exec(vsz, veusze):
                    """
                    Unsafe replasement for veusze.Load(vsz) to add variable argv
                    Runs any python commands before 1st Title command of
                    :param vsz:
                    :return:
                    """
                    with vsz.open(encoding='utf-8') as v:
                        # comine pure python lines
                        lines = []
                        for line in v:
                            if line[:2].istitle():
                                break
                            lines.append(line)

                        # dangerous for unknown vsz but we allow 1 time at beginning of file: to use for known vsz
                        loc_exclude = locals().copy()
                        del loc_exclude['veusze']
                        loc = {
                            'argv': ['veusz.exe', str(vsz)],
                            'BASENAME': (lambda: vsz.stem)
                        }
                        # match = re.match
                        exec('\n'.join(lines), {}, loc)
                        loc.update(locals().copy())
                        for k in loc_exclude.keys():
                            del loc[k]

                        basename_result = "'{}'".format(loc['BASENAME']())
                        # eval Veusz commands
                        eval(f"""veusze.{line}""")
                        for line in v:
                            if 'BASENAME()' in line:
                                line = line.replace(
                                    'BASENAME()', basename_result
                                )  # only this helps in Custom Definitions expressions

                            # cmd, params = line.split('(', maxsplit=1)
                            eval(f"""veusze.{line}""", {},
                                 loc)  # , {"__builtins__": {}}
                            # from ast import literal_eval
                            # params_dict = literal_eval(params.rsplit(')', maxsplit=1)[0])
                            # getattr(veusze, cmd)(**params_dict)
                    return

                load_by_exec(vsz, veusze)

        if prefix is None:
            return veusze, None
        return veusze, veusz_data(veusze, prefix, suffix_prior)
Exemple #11
0
def main(new_arg=None, **kwargs):
    """

    :param new_arg: list of strings, command line arguments
    :kwargs: dicts of dictcts (for each ini section): specified values overwrites ini values
    """

    # global l
    cfg = cfg_from_args(my_argparser(), new_arg, **kwargs)
    if not cfg['program']:
        return  # usually error of unrecognized arguments displayed
    cfg['in']['db_coefs'] = Path(cfg['in']['db_coefs'])
    for path_field in ['db_coefs', 'path_cruise']:
        if not cfg['in'][path_field].is_absolute():
            cfg['in'][path_field] = (
                cfg['in']['cfgFile'].parent / cfg['in'][path_field]
            ).resolve().absolute()  # cfg['in']['cfgFile'].parent /

    def constant_factory(val):
        def default_val():
            return val

        return default_val

    for lim in ('min_date', 'max_date'):
        # convert keys to int because they must be comparable to probes_int_list (for command line arguments keys are allways strings, in yaml you can set string or int)
        _ = {int(k): v for k, v in cfg['filter'][lim].items()}
        cfg['filter'][lim] = defaultdict(constant_factory(_.get(0)), _)

    l = init_logging(logging, None, None, 'INFO')
    #l = init_logging(logging, None, cfg['program']['log'], cfg['program']['verbose'])

    if True:  # False. Experimental speedup but takes memory
        from dask.cache import Cache
        cache = Cache(2e9)  # Leverage two gigabytes of memory
        cache.register()  # Turn cache on globally

    #if __debug__:
    # # because there was errors on debug when default scheduler used
    # cfg['program']['dask_scheduler'] = 'synchronous'

    if cfg['program']['dask_scheduler']:
        if cfg['program']['dask_scheduler'] == 'distributed':
            from dask.distributed import Client
            # cluster = dask.distributed.LocalCluster(n_workers=2, threads_per_worker=1, memory_limit="5.5Gb")
            client = Client(processes=False)
            # navigate to http://localhost:8787/status to see the diagnostic dashboard if you have Bokeh installed
            # processes=False: avoide inter-worker communication for computations releases the GIL (numpy, da.array)  # without is error
        else:
            if cfg['program']['dask_scheduler'] == 'synchronous':
                l.warning('using "synchronous" scheduler for debugging')
            import dask
            dask.config.set(scheduler=cfg['program']['dask_scheduler'])

    # Run steps :
    st.start = cfg['program']['step_start']
    st.end = cfg['program']['step_end']
    st.go = True

    if not cfg['out'][
            'db_name']:  # set name by 'path_cruise' name or parent if it has digits at start. priority for name  is  "*inclinometer*"
        for p in (lambda p: [p, p.parent])(cfg['in']['path_cruise']):
            m = re.match('(^[\d_]*).*', p.name)
            if m:
                break
        cfg['out']['db_name'] = f"{m.group(1).strip('_')}incl.h5"

    dir_incl = next((d for d in cfg['in']['path_cruise'].glob('*inclinometer*')
                     if d.is_dir()), cfg['in']['path_cruise'])
    db_path = dir_incl / '_raw' / cfg['out']['db_name']

    # ---------------------------------------------------------------------------------------------
    # def fs(probe, name):
    #     if 'w' in name.lower():  # Baranov's wavegauge electronic
    #         return 10  # 5
    #     return 5
    # if probe < 20 or probe in [23, 29, 30, 32, 33]:  # 30 [4, 11, 5, 12] + [1, 7, 13, 30]
    #     return 5
    # if probe in [21, 25, 26] + list(range(28, 35)):
    #     return 8.2
    # return 4.8

    def datetime64_str(time_str: Optional[str] = None) -> np.ndarray:
        """
        Reformat time_str to ISO 8601 or to 'NaT'. Used here for input in funcs that converts str to numpy.datetime64
        :param time_str: May be 'NaT'
        :return: ndarray of strings (tested for 1 element only) formatted by numpy.
        """
        return np.datetime_as_string(np.datetime64(time_str, 's'))

    probes = cfg['in']['probes'] or range(
        1, 41)  # sets default range, specify your values before line ---
    raw_root, probe_is_incl = re.subn('INCL_?', 'INKL_',
                                      cfg['in']['probes_prefix'].upper())

    # some parameters that depends of probe type (indicated by probes_prefix)
    p_type = defaultdict(
        # baranov's format
        constant_factory({
            'correct_fun':
            partial(correct_txt,
                    mod_file_name=mod_incl_name,
                    sub_str_list=[
                        b'^\r?(?P<use>20\d{2}(\t\d{1,2}){5}(\t\d{5}){8}).*',
                        b'^.+'
                    ]),
            'fs':
            10,
            'format':
            'Baranov',
        }),
        {
            (lambda x: x if x.startswith('incl') else 'incl')(cfg['in']['probes_prefix']):
            {
                'correct_fun':
                partial(
                    correct_txt,
                    mod_file_name=mod_incl_name,
                    sub_str_list=[
                        b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,6}){6}(,\d{1,2}\.\d{2})(,\-?\d{1,3}\.\d{2})).*',
                        b'^.+'
                    ]),
                'fs':
                5,
                'format':
                'Kondrashov',
            },
            'voln': {
                'correct_fun':
                partial(
                    correct_txt,
                    mod_file_name=mod_incl_name,
                    sub_str_list=[
                        b'^(?P<use>20\d{2}(,\d{1,2}){5}(,\-?\d{1,8})(,\-?\d{1,2}\.\d{2}){2}).*',
                        b'^.+'
                    ]),
                'fs':
                5,
                #'tbl_prefix': 'w',
                'format':
                'Kondrashov',
            }
        })

    if st(1, 'Save inclinometer or wavegage data from ASCII to HDF5'):
        # Note: Can not find additional not corrected files for same probe if already have any corrected in search path (move them out if need)

        i_proc_probe = 0  # counter of processed probes
        i_proc_file = 0  # counter of processed files
        # patten to identify only _probe_'s raw data files that need to correct '*INKL*{:0>2}*.[tT][xX][tT]':

        raw_parent = dir_incl / '_raw'  # raw_parent /=
        if cfg['in']['raw_subdir'] is None:
            cfg['in']['raw_subdir'] = ''

        dir_out = raw_parent / re.sub(r'[.\\/ *?]', '_',
                                      cfg['in']['raw_subdir'])

        # sub replaces multilevel subdirs to 1 level that correct_fun() can only make

        def dt_from_utc_2000(probe):
            """ Correct time of probes started without time setting. Raw date must start from  2000-01-01T00:00"""
            return (
                datetime(year=2000, month=1, day=1) -
                cfg['in']['time_start_utc'][probe]
            ) if cfg['in']['time_start_utc'].get(probe) else timedelta(0)

        # convert cfg['in']['dt_from_utc'] keys to int

        cfg['in']['dt_from_utc'] = {
            int(p): v
            for p, v in cfg['in']['dt_from_utc'].items()
        }
        # convert cfg['in']['t_start_utc'] to cfg['in']['dt_from_utc'] and keys to int
        cfg['in']['dt_from_utc'].update(    # overwriting the 'time_start_utc' where already exist
            {int(p): dt_from_utc_2000(p) for p, v in cfg['in']['time_start_utc'].items()}
            )
        # make cfg['in']['dt_from_utc'][0] be default value
        cfg['in']['dt_from_utc'] = defaultdict(
            constant_factory(cfg['in']['dt_from_utc'].pop(0, timedelta(0))),
            cfg['in']['dt_from_utc'])

        for probe in probes:
            raw_found = []
            raw_pattern_file = str(
                Path(glob.escape(cfg['in']['raw_subdir'])) /
                cfg['in']['raw_pattern'].format(prefix=raw_root, number=probe))
            correct_fun = p_type[cfg['in']['probes_prefix']]['correct_fun']
            # if not archive:
            if (not re.match(r'.*(\.zip|\.rar)$', cfg['in']['raw_subdir'],
                             re.IGNORECASE)) and raw_parent.is_dir():
                raw_found = list(raw_parent.glob(raw_pattern_file))
            if not raw_found:
                # Check if already have corrected files for probe generated by correct_txt(). If so then just use them
                raw_found = list(
                    dir_out.glob(
                        f"{cfg['in']['probes_prefix']}{probe:0>2}.txt"))
                if raw_found:
                    print('corrected csv file', [r.name for r in raw_found],
                          'found')
                    correct_fun = lambda x, dir_out: x
                elif not cfg['in']['raw_subdir']:
                    continue

            for file_in in (raw_found or open_csv_or_archive_of_them(
                    raw_parent, binary_mode=False, pattern=raw_pattern_file)):
                file_in = correct_fun(file_in, dir_out=dir_out)
                if not file_in:
                    continue
                tbl = file_in.stem  # f"{cfg['in']['probes_prefix']}{probe:0>2}"
                # tbl = re.sub('^((?P<i>inkl)|w)_0', lambda m: 'incl' if m.group('i') else 'w',  # correct name
                #              re.sub('^[\d_]*|\*', '', file_in.stem).lower()),  # remove date-prefix if in name
                csv2h5(
                    [
                        str(
                            Path(__file__).parent / 'ini' /
                            f"csv_{'inclin' if probe_is_incl else 'wavegage'}_{p_type[cfg['in']['probes_prefix']]['format']}.ini"
                        ),
                        '--path',
                        str(file_in),
                        '--blocksize_int',
                        '50_000_000',  # 50Mbt
                        '--table',
                        tbl,
                        '--db_path',
                        str(db_path),
                        # '--log', str(scripts_path / 'log/csv2h5_inclin_Kondrashov.log'),
                        # '--b_raise_on_err', '0',  # ?
                        '--b_interact',
                        '0',
                        '--fs_float',
                        str(p_type[cfg['in']['probes_prefix']]
                            ['fs']),  #f'{fs(probe, file_in.stem)}',
                        '--dt_from_utc_seconds',
                        str(cfg['in']['dt_from_utc'][probe].total_seconds()),
                        '--b_del_temp_db',
                        '1',
                    ] +
                    (['--csv_specific_param_dict', 'invert_magnitometr: True']
                     if probe_is_incl else []),
                    **{
                        'filter': {
                            'min_date':
                            cfg['filter']['min_date'].get(
                                probe, np.datetime64(0, 'ns')),
                            'max_date':
                            cfg['filter']['max_date'].get(
                                probe, np.datetime64('now', 'ns')
                            ),  # simple 'now' works in sinchronious mode
                        }
                    })

                # Get coefs:
                l.info(
                    f"Adding coefficients to {db_path}/{tbl} from {cfg['in']['db_coefs']}"
                )
                try:
                    h5copy_coef(cfg['in']['db_coefs'], db_path, tbl)
                except KeyError as e:  # Unable to open object (component not found)
                    l.warning(
                        'No coefs to copy?'
                    )  # write some dummy coefficients to can load Veusz patterns:
                    h5copy_coef(None,
                                db_path,
                                tbl,
                                dict_matrices=dict_matrices_for_h5(tbl=tbl))
                except OSError as e:
                    l.warning(
                        'Not found DB with coefs?'
                    )  # write some dummy coefficients to can load Veusz patterns:
                    h5copy_coef(None,
                                db_path,
                                tbl,
                                dict_matrices=dict_matrices_for_h5(tbl=tbl))
                i_proc_file += 1
            else:
                print('no', raw_pattern_file, end=', ')
            i_proc_probe += 1
        print('Ok:', i_proc_probe, 'probes,', i_proc_file, 'files processed.')

    if st(2, 'Calculate physical parameters and average'):
        kwarg = {
            'in': {
                'min_date': cfg['filter']['min_date'][0],
                'max_date': cfg['filter']['max_date'][0],
                'time_range_zeroing': cfg['in']['time_range_zeroing']
            },
            'proc': {}
        }
        # if aggregate_period_s is None then not average and write to *_proc_noAvg.h5 else loading from that h5 and writing to _proc.h5
        if not cfg['out']['aggregate_period_s']:
            cfg['out']['aggregate_period_s'] = [
                None, 2, 600, 7200 if probe_is_incl else 3600
            ]

        if cfg['in']['azimuth_add']:
            if 'Lat' in cfg['in']['azimuth_add']:
                # add magnetic declination,° for used coordinates
                # todo: get time
                kwarg['proc']['azimuth_add'] = mag_dec(
                    cfg['in']['azimuth_add']['Lat'],
                    cfg['in']['azimuth_add']['Lon'],
                    datetime(2020, 9, 10),
                    depth=-1)
            else:
                kwarg['proc']['azimuth_add'] = 0
            if 'constant' in cfg['in']['azimuth_add']:
                # and add constant. For example, subtruct declination at the calibration place if it was applied
                kwarg['proc']['azimuth_add'] += cfg['in']['azimuth_add'][
                    'constant']  # add -6.656 to account for calibration in Kaliningrad (mag deg = 6.656°)

        for aggregate_period_s in cfg['out']['aggregate_period_s']:
            if aggregate_period_s is None:
                db_path_in = db_path
                db_path_out = dir_incl / f'{db_path.stem}_proc_noAvg.h5'
            else:
                db_path_in = dir_incl / f'{db_path.stem}_proc_noAvg.h5'
                db_path_out = dir_incl / f'{db_path.stem}_proc.h5'  # or separately: '_proc{aggregate_period_s}.h5'

            # 'incl.*|w\d*'  inclinometers or wavegauges w\d\d # 'incl09':
            tables_list_regex = f"{cfg['in']['probes_prefix'].replace('voln', 'w')}.*"
            if cfg['in']['probes']:
                tables_list_regex += "(?:{})".format('|'.join(
                    '{:0>2}'.format(p) for p in cfg['in']['probes']))

            args = [
                '../../empty.yml',  # all settings are here, so to not print 'using default configuration' we use some existed empty file
                '--db_path',
                str(db_path_in),
                '--tables_list',
                tables_list_regex,
                '--aggregate_period',
                f'{aggregate_period_s}S' if aggregate_period_s else '',
                '--out.db_path',
                str(db_path_out),
                '--table',
                f'V_incl_bin{aggregate_period_s}'
                if aggregate_period_s else 'V_incl',
                '--verbose',
                'INFO',  #'DEBUG' get many numba messages
                '--b_del_temp_db',
                '1',
                # '--calc_version', 'polynom(force)',  # depreshiated
                # '--chunksize', '20000',
                # '--not_joined_h5_path', f'{db_path.stem}_proc.h5',
            ]

            if aggregate_period_s is None:  # proc. parameters (if we have saved proc. data then when aggregating we are not processing)
                # Note: for Baranov's prog 4096 is not suited:
                args += ([
                    '--max_dict',
                    'M[xyz]:4096',
                    # '--time_range_zeroing_dict', "incl19: '2019-11-10T13:00:00', '2019-11-10T14:00:00'\n,"  # not works - use kwarg
                    # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00'
                    '--split_period',
                    '1D'
                ] if probe_is_incl else [
                    '--bad_p_at_bursts_starts_peroiod',
                    '1H',
                ])
                # csv splitted by 1day (default for no avg) else csv is monolith
            if aggregate_period_s not in cfg['out'][
                    'aggregate_period_s_not_to_text']:  # , 300, 600]:
                args += ['--text_path', str(dir_incl / 'text_output')]
            # If need all data to be combined one after one:
            # set_field_if_no(kwarg, 'in', {})
            # kwarg['in'].update({
            #
            #         'tables': [f'incl{i:0>2}' for i in min_date.keys() if i!=0],
            #         'dates_min': min_date.values(),  # in table list order
            #         'dates_max': max_date.values(),  #
            #         })
            # set_field_if_no(kwarg, 'out', {})
            # kwarg['out'].update({'b_all_to_one_col': 'True'})

            incl_h5clc.main(args, **kwarg)

    if st(3, 'Calculate spectrograms'):  # Can be done at any time after step 1
        min_Pressure = 7

        # add dict dates_min like {probe: parameter} of incl_clc to can specify param to each probe
        def raise_ni():
            raise NotImplementedError(
                'Can not proc probes having different fs in one run: you need to do it separately'
            )

        args = [
            Path(incl_h5clc.__file__).with_name(
                f'incl_h5spectrum{db_path.stem}.yaml'),
            # if no such file all settings are here
            '--db_path',
            str(dir_incl / f'{db_path.stem}_proc_noAvg.h5'),
            '--tables_list',
            f"{cfg['in']['probes_prefix']}.*",  # inclinometers or wavegauges w\d\d  ## 'w02', 'incl.*',
            # '--aggregate_period', f'{aggregate_period_s}S' if aggregate_period_s else '',
            '--min_date',
            datetime64_str(cfg['filter']['min_date'][0]),
            '--max_date',
            datetime64_str(cfg['filter']['max_date']
                           [0]),  # '2019-09-09T16:31:00',  #17:00:00
            '--min_Pressure',
            f'{min_Pressure}',
            # '--max_dict', 'M[xyz]:4096',  # use if db_path is not ends with _proc_noAvg.h5 i.e. need calc velocity
            '--out.db_path',
            f"{db_path.stem.replace('incl', cfg['in']['probes_prefix'])}_proc_psd.h5",
            # '--table', f'psd{aggregate_period_s}' if aggregate_period_s else 'psd',
            '--fs_float',
            str(p_type[cfg['in']['probes_prefix']]
                ['fs']),  # f"{fs(probes[0], cfg['in']['probes_prefix'])}",
            # (lambda x: x == x[0])(np.vectorize(fs)(probes, prefix))).all() else raise_ni()
            #
            # '--time_range_zeroing_list', '2019-08-26T04:00:00, 2019-08-26T05:00:00'
            # '--verbose', 'DEBUG',
            # '--chunksize', '20000',
            '--b_interact',
            '0',
        ]
        if probe_is_incl:
            args += [
                '--split_period',
                '2H',
                '--fmin',
                '0.0004',  #0.0004
                '--fmax',
                '1.05'
            ]
        else:
            args += [
                '--split_period',
                '1H',
                '--dt_interval_minutes',
                '15',  # set this if burst mode to the burst interval
                '--fmin',
                '0.0001',
                '--fmax',
                '4',
                #'--min_Pressure', '-1e15',  # to not load NaNs
            ]

        incl_h5spectrum.main(args)

    if st(4, 'Draw in Veusz'):
        pattern_path = dir_incl / r'processed_h5,vsz/201202-210326incl_proc#28.vsz'
        # r'\201202_1445incl_proc#03_pattern.vsz'  #'
        # db_path.parent / r'vsz_5min\191119_0000_5m_incl19.vsz'  # r'vsz_5min\191126_0000_5m_w02.vsz'

        b_images_only = False
        # importing in vsz index slices replacing:
        pattern_str_slice_old = None

        # Length of not adjacent intervals, s (set None to not allow)
        # pandas interval in string or tuple representation '1D' of period between intervals and interval to draw
        period_str = '0s'  # '1D'  #  dt
        dt_str = '0s'  # '5m'
        file_intervals = None

        period = to_offset(period_str).delta
        dt = to_offset(dt_str).delta  # timedelta(0)  #  60 * 5

        if file_intervals and period and dt:

            # Load starts and assign ends
            t_intervals_start = pd.read_csv(
                cfg['in']['path_cruise'] /
                r'vsz+h5_proc\intervals_selected.txt',
                converters={
                    'time_start': lambda x: np.datetime64(x, 'ns')
                },
                index_col=0).index
            edges = (pd.DatetimeIndex(t_intervals_start),
                     pd.DatetimeIndex(t_intervals_start + dt_custom_s)
                     )  # np.zeros_like()
        elif period and dt:
            # Generate periodic intervals
            t_interval_start, t_intervals_end = intervals_from_period(
                datetime_range=np.array(
                    [
                        cfg['filter']['min_date']['0'],
                        cfg['filter']['max_date']['0']
                    ],
                    # ['2018-08-11T18:00:00', '2018-09-06T00:00:00'],
                    # ['2019-02-11T13:05:00', '2019-03-07T11:30:00'],
                    # ['2018-11-16T15:19', '2018-12-14T14:35'],
                    # ['2018-10-22T12:30', '2018-10-27T06:30:00'],
                    'datetime64[s]'),
                period=period)
            edges = (pd.DatetimeIndex([t_interval_start
                                       ]).append(t_intervals_end[:-1]),
                     pd.DatetimeIndex(t_intervals_end))
        else:  # [min, max] edges for each probe
            edges_dict = {
                pr:
                [cfg['filter']['min_date'][pr], cfg['filter']['max_date'][pr]]
                for pr in probes
            }

        cfg_vp = {'veusze': None}
        for i, probe in enumerate(probes):
            # cfg_vp = {'veusze': None}
            if edges_dict:  # custom edges for each probe
                edges = [pd.DatetimeIndex([t]) for t in edges_dict[probe]]

            # substr in file to rerplace probe_name_in_pattern (see below).
            probe_name = f"_{cfg['in']['probes_prefix'].replace('incl', 'i')}{probe:02}"
            tbl = None  # f"/{cfg['in']['probes_prefix']}{probe:02}"  # to check probe data exist in db else will not check
            l.info('Draw %s in Veusz: %d intervals...', probe_name,
                   edges[0].size)
            # for i_interval, (t_interval_start, t_interval_end) in enumerate(zip(pd.DatetimeIndex([t_interval_start]).append(t_intervals_end[:-1]), t_intervals_end), start=1):

            for i_interval, (t_interval_start,
                             t_interval_end) in enumerate(zip(*edges),
                                                          start=1):

                # if i_interval < 23: #<= 0:  # TEMPORARY Skip this number of intervals
                #     continue
                if period and period != dt:
                    t_interval_start = t_interval_end - pd.Timedelta(
                        dt_custom_s, 's')

                if tbl:
                    try:  # skipping absent probes
                        start_end = h5q_interval2coord(
                            db_path=str(db_path),
                            table=tbl,
                            t_interval=(t_interval_start, t_interval_end))
                        if not len(start_end):
                            break  # no data
                    except KeyError:
                        break  # device name not in specified range, go to next name

                pattern_path_new = pattern_path.with_name(''.join([
                    f'{t_interval_start:%y%m%d_%H%M}',
                    f'_{dt_str}' if dt else '', f'{probe_name}.vsz'
                ]))

                # Modify pattern file
                if not b_images_only:
                    pattern_type, pattern_number = re.match(
                        r'.*(incl|w)_proc?#?(\d*).*',
                        pattern_path.name).groups()
                    probe_name_in_pattern = f"_{pattern_type.replace('incl', 'i')}{pattern_number}"

                    def f_replace(line):
                        """
                        Replace in file
                        1. probe name
                        2. slice
                        """
                        # if i_interval == 1:
                        line, ok = re.subn(probe_name_in_pattern, probe_name,
                                           line)
                        if ok and pattern_str_slice_old:  # can be only in same line
                            str_slice = '(({:d}, {:d}, None),)'.format(
                                *(start_end +
                                  np.int32([-1, 1])))  # bytes(, 'ascii')
                            line = re.sub(pattern_str_slice_old, str_slice,
                                          line)
                        return line

                    if not rep_in_file(pattern_path,
                                       pattern_path_new,
                                       f_replace=f_replace,
                                       binary_mode=False):
                        l.warning('Veusz pattern not changed!'
                                  )  # may be ok if we need draw pattern
                        # break
                    elif cfg_vp['veusze']:
                        cfg_vp['veusze'].Load(str(pattern_path_new))
                elif cfg_vp['veusze']:
                    cfg_vp['veusze'].Load(str(pattern_path_new))

                txt_time_range = \
                    """
                    "[['{:%Y-%m-%dT%H:%M}', '{:%Y-%m-%dT%H:%M}']]" \
                    """.format(t_interval_start, t_interval_end)
                print(f'{i_interval}. {txt_time_range}', end=' ')

                cfg_vp = veuszPropagate.main(
                    [
                        Path(veuszPropagate.__file__).parent.with_name(
                            'veuszPropagate.ini'),
                        # '--data_yield_prefix', '-',

                        # '--path', str(db_path),  # if custom loading from db and some source is required
                        '--tables_list',
                        '',  # switches to search vsz-files only # f'/{probe_name}',  # 181022inclinometers/ \d*
                        '--pattern_path',
                        str(pattern_path_new),
                        # fr'd:\workData\BalticSea\190801inclinometer_Schuka\{probe_name}_190807_1D.vsz',
                        # str(dir_incl / f'{probe_name}_190211.vsz'), #warning: create file with small name
                        # '--before_next', 'restore_config',
                        # '--add_to_filename', f"_{t_interval_start:%y%m%d_%H%M}_{dt}",
                        '--filename_fun',
                        f'lambda tbl: "{pattern_path_new.name}"',
                        '--add_custom_list',
                        f'USEtime__',  # f'USEtime{probe_name}', nAveragePrefer',
                        '--add_custom_expressions_list',
                        txt_time_range,
                        # + """
                        # ", 5"
                        # """,
                        '--b_update_existed',
                        'True',
                        '--export_pages_int_list',
                        '0',  # 0 for all '6, 7, 8',  #'1, 2, 3'
                        # '--export_dpi_int', '200',
                        '--export_format',
                        'jpg',  #'emf',
                        '--b_interact',
                        '0',
                        '--b_images_only',
                        f'{b_images_only}',
                        '--return',
                        '<embedded_object>',  # reuse to not bloat memory
                        '--b_execute_vsz',
                        'True',
                        '--before_next',
                        'Close()'  # Close() need if b_execute_vsz many files
                    ],
                    veusze=cfg_vp['veusze'])

    if st(40, f'Draw in Veusz by loader-drawer.vsz method'):
        # save all vsz files that uses separate code

        from os import chdir as os_chdir
        dt_s = 300
        cfg['in'][
            'pattern_path'] = db_path.parent / f'vsz_{dt_s:d}s' / '~pattern~.vsz'

        time_starts = pd.read_csv(
            db_path.parent / r'processed_h5,vsz' / 'intervals_selected.txt',
            index_col=0,
            parse_dates=True,
            date_parser=lambda x: pd.to_datetime(x, format='%Y-%m-%dT%H:%M:%S'
                                                 )).index

        pattern_code = cfg['in']['pattern_path'].read_bytes(
        )  # encoding='utf-8'
        path_vsz_all = []
        for i, probe in enumerate(probes):
            probe_name = f"{cfg['in']['probes_prefix']}{probe:02}"  # table name in db
            l.info('Draw %s in Veusz: %d intervals...', probe_name,
                   time_starts.size)
            for i_interval, time_start in enumerate(time_starts, start=1):
                path_vsz = cfg['in']['pattern_path'].with_name(
                    f"{time_start:%y%m%d_%H%M}_{probe_name.replace('incl','i')}.vsz"
                )
                # copy file to path_vsz
                path_vsz.write_bytes(pattern_code)  # replaces 1st row
                path_vsz_all.append(path_vsz)

        os_chdir(cfg['in']['pattern_path'].parent)
        veuszPropagate.main(
            [
                'ini/veuszPropagate.ini',
                '--path',
                str(cfg['in']['pattern_path'].with_name(
                    '??????_????_*.vsz')),  # db_path),
                '--pattern_path',
                f"{cfg['in']['pattern_path']}_",
                # here used to auto get export dir only. may not be _not existed file path_ if ['out']['paths'] is provided
                # '--table_log', f'/{device}/logRuns',
                # '--add_custom_list', f'{device_veusz_prefix}USE_time_search_runs',  # 'i3_USE_timeRange',
                # '--add_custom_expressions',
                # """'[["{log_row[Index]:%Y-%m-%dT%H:%M:%S}", "{log_row[DateEnd]:%Y-%m-%dT%H:%M:%S}"]]'""",
                # '--export_pages_int_list', '1', #'--b_images_only', 'True'
                '--b_interact',
                '0',
                '--b_update_existed',
                'True',  # todo: delete_overlapped
                '--b_images_only',
                'True',
                '--load_timeout_s_float',
                str(cfg['program']['load_timeout_s'])
                # '--min_time', '2020-07-08T03:35:00',
            ],
            **{'out': {
                'paths': path_vsz_all
            }})

    if st(50, 'Export from existed Veusz files in dir'):
        pattern_parent = db_path.parent  # r'vsz_5min\191126_0000_5m_w02.vsz''
        pattern_path = str(pattern_parent / r'processed_h5,vsz' /
                           '??????incl_proc#[1-9][0-9].vsz')  # [0-2,6-9]
        veuszPropagate.main([
            'ini/veuszPropagate.ini',
            '--path',
            pattern_path,
            '--pattern_path',
            pattern_path,
            # '--export_pages_int_list', '1', #'--b_images_only', 'True'
            '--b_interact',
            '0',
            '--b_update_existed',
            'True',  # todo: delete_overlapped
            '--b_images_only',
            'True',
            '--load_timeout_s_float',
            str(cfg['program']['load_timeout_s']),
            '--b_execute_vsz',
            'True',
            '--before_next',
            'Close()'  # Close() need if b_execute_vsz many files
        ])
Exemple #12
0
from os import chdir as os_chdir
from os import system as os_system
from time import sleep as delay
from uuid import getnode as get_mac

macs = {'Macbook':'105773427819682',
        'MacBookPro':'117637351435',
        'MBP2':'220083054034723'}

this_mac = get_mac()
if str(this_mac)==macs['Macbook']:   #   MB
    os_chdir('/Users/sethchase/Dropbox/BD_Scripts/django/Dropbox/aprinto')
elif str(this_mac)==macs['MacBookPro']:   #   MBP
    os_chdir('/Users/admin/django/Dropbox/aprinto')

print '\n\tRe-Building Content and Testing...\n'
os_system('ENV/bin/python manage.py reset_db --noinput')
os_system('ENV/bin/python manage.py syncdb --noinput')

print '\n\tStarting Development Server ...'
os_system('ENV/bin/python manage.py runserver 0.0.0.0:8080 &')
delay(6)

# os_system('ENV/bin/python management/initialize.py')
os_system('ENV/bin/python management/tests.py "dev"')                         # Run Tests

os_system('pkill -f "python manage.py runserver"')

Exemple #13
0
def chdir(path):
    from os import chdir as os_chdir
    print('entering directory: ' + path)
    os_chdir(path)
Exemple #14
0
#Cache Compatibilty Layer's Code
g = open('cache/build.ytmp', mode='w')
#g = open('cache/build.py', mode='w')
for j in compatiblity:
    g.write(j)
g.close()

#Convert to Byte Code
name = 'example-compiled.pyc.yt'
try:
    os_remove('cache/__pycache__/' + name)
except (FileNotFoundError):
    pass
py_compile.compile('cache/build.ytmp')
os_rename('cache/__pycache__/build.cpython-37.pyc',
          str('cache/__pycache__/' + name))

package_name = 'example-package.zip'
#Zip file creation
os_chdir('cache/__pycache__/')
package = zipfile.ZipFile(package_name, mode='w')
package.write(name)
package.close()
os_chdir('..')
os_chdir('..')
os_rename('cache/__pycache__/' + package_name, str('output/' + package_name))

#Wait
print("Press any key to close...")
a = input()
Exemple #15
0
def _main(
    inventory,
    operations,
    verbosity,
    chdir,
    ssh_user,
    ssh_port,
    ssh_key,
    ssh_key_password,
    ssh_password,
    winrm_username,
    winrm_password,
    winrm_port,
    winrm_transport,
    shell_executable,
    sudo,
    sudo_user,
    use_sudo_password,
    su_user,
    parallel,
    fail_percent,
    data,
    group_data,
    config_filename,
    dry,
    limit,
    no_wait,
    serial,
    quiet,
    debug,
    debug_facts,
    debug_operations,
    support=None,
):
    # Setup working directory
    #

    if chdir:
        os_chdir(chdir)

    # Setup logging
    #

    if not debug and not sys.warnoptions:
        warnings.simplefilter("ignore")

    log_level = logging.INFO
    if debug:
        log_level = logging.DEBUG
    elif quiet:
        log_level = logging.WARNING

    setup_logging(log_level)

    # Bootstrap any virtualenv
    init_virtualenv()

    #  Check operations are valid and setup command
    #

    # Make a copy before we overwrite
    original_operations = operations

    # Debug (print) inventory + group data
    if operations[0] == "debug-inventory":
        command = "debug-inventory"

    # Get one or more facts
    elif operations[0] == "fact":
        command = "fact"
        operations = get_facts_and_args(operations[1:])

    # Execute a raw command with server.shell
    elif operations[0] == "exec":
        command = "exec"
        operations = operations[1:]

    # Execute one or more deploy files
    elif all(cmd.endswith(".py") for cmd in operations):
        command = "deploy"

        filenames = []

        for filename in operations[0:]:
            if path.exists(filename):
                filenames.append(filename)
                continue
            if chdir and filename.startswith(chdir):
                correct_filename = path.relpath(filename, chdir)
                logger.warning(
                    (
                        "Fixing deploy filename under `--chdir` argument: "
                        f"{filename} -> {correct_filename}"
                    ),
                )
                filenames.append(correct_filename)
                continue
            raise CliError(
                "No deploy file: {0}".format(
                    path.join(chdir, filename) if chdir else filename,
                ),
            )

        operations = filenames

    # Operation w/optional args (<module>.<op> ARG1 ARG2 ...)
    elif len(operations[0].split(".")) == 2:
        command = "op"
        operations = get_operation_and_args(operations)

    else:
        raise CliError(
            """Invalid operations: {0}

    Operation usage:
    pyinfra INVENTORY deploy_web.py [deploy_db.py]...
    pyinfra INVENTORY server.user pyinfra home=/home/pyinfra
    pyinfra INVENTORY exec -- echo "hello world"
    pyinfra INVENTORY fact os [users]...""".format(
                operations,
            ),
        )

    # Setup state, config & inventory
    #

    cwd = getcwd()
    if cwd not in sys.path:  # ensure cwd is present in sys.path
        sys.path.append(cwd)

    state = State()
    state.cwd = cwd
    ctx_state.set(state)

    if verbosity > 0:
        state.print_fact_info = True
        state.print_noop_info = True

    if verbosity > 1:
        state.print_input = state.print_fact_input = True

    if verbosity > 2:
        state.print_output = state.print_fact_output = True

    if not quiet:
        click.echo("--> Loading config...", err=True)

    config = Config()
    ctx_config.set(config)

    # Load up any config.py from the filesystem
    config_filename = path.join(state.cwd, config_filename)
    if path.exists(config_filename):
        exec_file(config_filename)

    # Lock the current config, this allows us to restore this version after
    # executing deploy files that may alter them.
    config.lock_current_state()

    # Arg based config overrides
    if sudo:
        config.SUDO = True
        if sudo_user:
            config.SUDO_USER = sudo_user

    if use_sudo_password:
        config.USE_SUDO_PASSWORD = use_sudo_password

    if su_user:
        config.SU_USER = su_user

    if parallel:
        config.PARALLEL = parallel

    if shell_executable:
        config.SHELL = None if shell_executable in ("None", "null") else shell_executable

    if fail_percent is not None:
        config.FAIL_PERCENT = fail_percent

    if not quiet:
        click.echo("--> Loading inventory...", err=True)

    override_data = {}

    for arg in data:
        key, value = arg.split("=", 1)
        override_data[key] = value

    override_data = {key: parse_cli_arg(value) for key, value in override_data.items()}

    for key, value in (
        ("ssh_user", ssh_user),
        ("ssh_key", ssh_key),
        ("ssh_key_password", ssh_key_password),
        ("ssh_port", ssh_port),
        ("ssh_password", ssh_password),
        ("winrm_username", winrm_username),
        ("winrm_password", winrm_password),
        ("winrm_port", winrm_port),
        ("winrm_transport", winrm_transport),
    ):
        if value:
            override_data[key] = value

    # Load up the inventory from the filesystem
    inventory, inventory_group = make_inventory(
        inventory,
        cwd=state.cwd,
        override_data=override_data,
        group_data_directories=group_data,
    )
    ctx_inventory.set(inventory)

    # Now that we have inventory, apply --limit config override
    initial_limit = None
    if limit:
        all_limit_hosts = []

        for limiter in limit:
            try:
                limit_hosts = inventory.get_group(limiter)
            except NoGroupError:
                limit_hosts = [host for host in inventory if fnmatch(host.name, limiter)]

            if not limit_hosts:
                logger.warning("No host matches found for --limit pattern: {0}".format(limiter))

            all_limit_hosts.extend(limit_hosts)
        initial_limit = list(set(all_limit_hosts))

    # Initialise the state
    state.init(inventory, config, initial_limit=initial_limit)

    if command == "debug-inventory":
        print_inventory(state)
        _exit()

    # Connect to the hosts & start handling the user commands
    #

    if not quiet:
        click.echo(err=True)
        click.echo("--> Connecting to hosts...", err=True)

    connect_all(state)

    if command == "fact":
        if not quiet:
            click.echo(err=True)
            click.echo("--> Gathering facts...", err=True)

        state.print_fact_info = True
        fact_data = {}

        for i, command in enumerate(operations):
            fact_cls, args, kwargs = command
            fact_key = fact_cls.name

            if args or kwargs:
                fact_key = "{0}{1}{2}".format(
                    fact_cls.name,
                    args or "",
                    " ({0})".format(get_kwargs_str(kwargs)) if kwargs else "",
                )

            try:
                fact_data[fact_key] = get_facts(
                    state,
                    fact_cls,
                    args=args,
                    kwargs=kwargs,
                    apply_failed_hosts=False,
                )
            except PyinfraError:
                pass

        print_facts(fact_data)
        _exit()

    if command == "exec":
        state.print_output = True
        add_op(
            state,
            server.shell,
            " ".join(operations),
            _allow_cli_mode=True,
        )

    elif command == "deploy":
        if not quiet:
            click.echo(err=True)
            click.echo("--> Preparing operations...", err=True)

        # Number of "steps" to make = number of files * number of hosts
        for i, filename in enumerate(operations):
            logger.info("Loading: {0}".format(click.style(filename, bold=True)))

            state.current_op_file_number = i
            load_deploy_file(state, filename)

            # Remove any config changes introduced by the deploy file & any includes
            config.reset_locked_state()

    elif command == "op":
        if not quiet:
            click.echo(err=True)
            click.echo("--> Preparing operation...", err=True)

        op, args = operations
        args, kwargs = args
        kwargs["_allow_cli_mode"] = True

        def print_host_ready(host):
            logger.info(
                "{0}{1} {2}".format(
                    host.print_prefix,
                    click.style("Ready:", "green"),
                    click.style(original_operations[0], bold=True),
                ),
            )

        kwargs["_after_host_callback"] = print_host_ready

        add_op(state, op, *args, **kwargs)

    # Print proposed changes, execute unless --dry, and exit
    #

    if not quiet:
        click.echo(err=True)
        click.echo("--> Proposed changes:", err=True)
    print_meta(state)

    # If --debug-facts or --debug-operations, print and exit
    if debug_facts or debug_operations:
        if debug_facts:
            print_state_facts(state)

        if debug_operations:
            print_state_operations(state)

        _exit()

    if dry:
        _exit()

    if not quiet:
        click.echo(err=True)

    if not quiet:
        click.echo("--> Beginning operation run...", err=True)
    run_ops(state, serial=serial, no_wait=no_wait)

    if not quiet:
        click.echo("--> Results:", err=True)
    print_results(state)

    _exit()