Example #1
0
    external_stylesheets=theme.external_stylesheets,
    meta_tags=[{
        "name": "viewport",
        "content": "width=device-width, initial-scale=1"
    }],
)
app.logger.setLevel(logging.WARNING)
server = app.server

app.title = "Reek Webviz Demonstration"
app.config.suppress_callback_exceptions = True

app.webviz_settings = {
    "shared_settings":
    webviz_config.SHARED_SETTINGS_SUBSCRIPTIONS.transformed_settings(
        {}, PosixPath('/home/anders/git/webviz-config/examples'), True),
    "portable":
    True,
    "theme":
    theme,
}

CACHE.init_app(server)

theme.adjust_csp({"script-src": app.csp_hashes()}, append=True)
Talisman(server,
         content_security_policy=theme.csp,
         feature_policy=theme.feature_policy)

WEBVIZ_STORAGE.use_storage = True
WEBVIZ_STORAGE.storage_folder = Path(
Example #2
0
        Fixture.GENERIC_SERIALIZABLE_LIST_INT,
        [3, 4, 5],
    ),
    (
        GenericSerializableList[str],
        Fixture.GENERIC_SERIALIZABLE_LIST_STR,
        ["_a", "_b", "_c"],
    ),
    (MyDatetimeNewType, Fixture.DATETIME, Fixture.DATETIME_STR),
]

if os.name == "posix":
    inner_values.extend([
        (Path, Path("/a/b/c"), "/a/b/c"),
        (PurePath, PurePath("/a/b/c"), "/a/b/c"),
        (PosixPath, PosixPath("/a/b/c"), "/a/b/c"),
        (PurePosixPath, PurePosixPath("/a/b/c"), "/a/b/c"),
        (os.PathLike, PurePosixPath("/a/b/c"), "/a/b/c"),
    ])
else:
    inner_values.extend([
        (Path, Path("/a/b/c"), "\\a\\b\\c"),
        (PurePath, PurePath("/a/b/c"), "\\a\\b\\c"),
        (WindowsPath, WindowsPath("C:/Windows"), "C:\\Windows"),
        (
            PureWindowsPath,
            PureWindowsPath("C:/Program Files"),
            "C:\\Program Files",
        ),
        (os.PathLike, PureWindowsPath("/a/b/c"), "\\a\\b\\c"),
    ])
Example #3
0

# Gather variables that will be shared across functions
with flywheel.GearContext() as context:
    # Setup basic logging
    context.init_logging()
    # Log the configuration for this job
    # context.log_config()
    config = context.config
    builtin_recon = config.get('recon_builtin')
    recon_spec = builtin_recon if builtin_recon else \
        context.get_input_path('recon_spec')
    ignore = config.get('ignore', '').split()
    output_space = config.get('output_space', '').split()
    analysis_id = context.destination['id']
    gear_output_dir = PosixPath(context.output_dir)
    qsiprep_script = gear_output_dir / "qsiprep_run.sh"
    output_root = gear_output_dir / analysis_id
    working_dir = PosixPath(str(output_root.resolve()) + "_work")
    bids_dir = output_root
    bids_root = bids_dir / 'bids_dataset'
    # Get relevant container objects
    fw = flywheel.Client(context.get_input('api_key')['key'])
    analysis_container = fw.get(analysis_id)
    project_container = fw.get(analysis_container.parents['project'])
    session_container = fw.get(analysis_container.parent['id'])
    subject_container = fw.get(session_container.parents['subject'])

    project_label = project_container.label
    extra_t1 = context.get_input('t1_anatomy')
    extra_t1_path = None if extra_t1 is None else \
def test_adding_corpus(mock):
    mock.return_value = PosixPath('blah')
    mock.exists().return_value = False
    with pytest.raises(FileExistsError,
                       match="The path of the file is invalid"):
        WatsonSTT(url=url).add_corpus("blah")
Example #5
0
 def __init__(self, training_output: os.PathLike) -> None:
     """Class constructor."""
     training_path = PosixPath(training_output)
     self._model = joblib.load(training_path / "model.joblib.gz")
     self.parse_summary_json(training_path / "summary.json")
Example #6
0
    def to_pytables(self,
                    file_name: str,
                    to_hdf_kw: Optional[Dict[str, Any]] = None) -> None:
        """Write dataset to disk as a pytables h5 file

        This method saves a file using a strict twaml-compatible
        naming scheme. An existing dataset label **is not
        stored**. The properties of the class that are serialized to
        disk (and the associated key for each item):

        - ``df`` as ``{name}_payload``
        - ``weights`` as ``{name}_{weight_name}``
        - ``auxweights`` as ``{name}_auxweights``
        - ``wtloop_metas`` as ``{name}_wtloop_metas``

        These properties are wrapped in a pandas DataFrame (if they
        are not already) to be stored in a .h5 file. The
        :meth:`from_pytables` is designed to read in this output; so
        the standard use case is to call this function to store a
        dataset that was intialized via :meth:`from_root`.

        Internally this function uses :meth:`pandas.DataFrame.to_hdf`
        on a number of structures.

        Parameters
        ----------
        file_name:
          output file name,
        format:
          dict of keyword arguments fed to :meth:`pd.DataFrame.to_hdf`

        Examples
        --------

        >>> ds = twaml.dataset.from_root("file.root", name="myds",
        ...                              detect_weights=True, wtloop_metas=True)
        >>> ds.to_pytables("output.h5")
        >>> ds_again = twaml.dataset.from_pytables("output.h5")
        >>> ds_again.name
        'myds'

        """
        if to_hdf_kw is None:
            to_hdf_kw = {}
        log.info(
            f"Creating pytables dataset with name '{self.name}' in {file_name}"
        )
        log.info(f"  selection used: '{self.selection_formula}'")
        log.info(f"  original source was:")
        for fname in self.files:
            log.info(f"   - {fname}")
        if PosixPath(file_name).exists():
            log.warning(f"{file_name} exists, overwriting")
        weights_frame = pd.DataFrame(dict(weights=self._weights))
        self._df.to_hdf(file_name,
                        f"{self.name}_payload",
                        mode="w",
                        **to_hdf_kw)
        weights_frame.to_hdf(file_name,
                             f"{self.name}_{self.weight_name}",
                             mode="a")
        if self._auxweights is not None:
            self._auxweights.to_hdf(file_name,
                                    f"{self.name}_auxweights",
                                    mode="a")
        if self.wtloop_metas is not None:
            tempdict = {
                k: np.array([str(v)])
                for k, v in self.wtloop_metas.items()
            }
            wtmetadf = pd.DataFrame.from_dict(tempdict)
            wtmetadf.to_hdf(file_name, f"{self.name}_wtloop_metas", mode="a")
Example #7
0
    def dec(f):
        func = partial(f, *args, **kwargs)
        MODULE_CONFIG['additional_loaders'].append(func)
        return f

    return dec


# Config path load order -Not to be edited manually!
_SEARCH_DIRS = []
_FILENAMES = []
_FILEPATHS = []

# Search extra directories first
_SEARCH_DIRS += MODULE_CONFIG['extra_search_dirs']
_SEARCH_DIRS = [PosixPath(p) for p in _SEARCH_DIRS]

if MODULE_CONFIG['conf_path_from_env']:
    path = os.environ.get('%s_CONF_PATH' % pkg_name.upper())
    if path:
        path = Posixpath(path).resolve()
        if path.is_dir():  # If we have a directory
            _SEARCH_DIRS.append(path)  # Add it to _SEARCH_DIRS
        else:  # Otherwise add it to explicit filenames
            _FILEPATHS.append(path)

if MODULE_CONFIG['load_from_pwd']:
    _SEARCH_DIRS.append(PosixPath('.'))

if MODULE_CONFIG['load_from_home']:
    _SEARCH_DIRS.append(PosixPath(os.path.expanduser('~')))
Example #8
0
        'active': {
            'type': 'str',
            'in': False,
            'required': True
            },
        'passive': {
            'type': 'str',
            'in': False,
            'required': True
            }
        }
    }

# PID information
PID = getpid()
PID_FILE = PosixPath('/var/run/oniond.pid')


# +-----------------------------------------------------------------+
# | HELP                                                            |
# +-----------------------------------------------------------------+
build = f'{__version__} (build {__build__})'

ABOUT = f'''
             o+              +-------------------------------------------+
             /d/             | Onion HA Engine                           |
          .h  dds.           +-------------------------------------------+
         /h+  ddhdo.         | Version: {build:32} |
       :yh/  -ddh/ ds.       | Release: {__date__:32} |
     -yds`  `hdddy  dd+      | Author:  {__author__:32} |
    /ddy    yddddd+  dd+     | License: {__license__:32} |
Example #9
0
class AccurateRip:
    # BINARY = PosixPath("/home/bas/NerdProjecten/cdrip/accuraterip/accuraterip")
    BINARY = PosixPath("/home/bas/pycharm/cdrip/accuraterip/accuraterip")
    PREVIOUS_TRACK_FRAMES = (5880 // 2)
    NEXT_TRACK_FRAMES = (5880 // 2)

    def __init__(self, disc: cd.Disc, wav_file: PathLike):
        self._ar_results: Optional[AccurateRipResults] = None
        if not self.BINARY.exists():
            raise FileNotFoundError(
                f"Cannot find accuraterip binary at f{self.BINARY}")
        self._disc = disc
        self._wav = wav_file

    @property
    def offset(self) -> int:
        return self._disc.cdplayer.offset

    @property
    def ar_results(self) -> Optional[AccurateRipResults]:
        if self._ar_results is None:
            self.ar_lookup()
        return self._ar_results

    def checksum_disc(
        self
    ) -> Dict[cd.TrackNr, Dict[AccurateRipConfidence, AccurateRipTrackID1]]:
        return self.checksum_disc_audiotools()

    def checksum_track(self,
                       track: cd.Track) -> Dict[int, AccurateRipTrackID1]:
        return self._checksum_track_audiotools(track)

    def _checksum_track_audiotools(
            self, track: cd.Track) -> Dict[int, AccurateRipTrackID1]:
        file = audiotools.open(self._wav)
        # just doublechecking
        if not isinstance(file, audiotools.WaveAudio) \
           or not file.supports_to_pcm() \
           or file.channels() != 2 \
           or file.sample_rate() != cd.CDA_SAMLES_PER_SEC \
           or file.bits_per_sample() != cd.CDA_BITS_PER_SAMPLE:
            raise AccurateRipException(
                "Input file doesn't look like a CDA rip")

        # most of this is taken from https://github.com/tuffy/python-audio-tools/blob/master/trackverify#L244
        reader = file.to_pcm()
        if not hasattr(reader, "seek") or not callable(reader.seek):
            raise AccurateRipException("Can't seek in file")

        # we start reading a bit before the track, in order to try out different offsets for the accuraterip checksums
        # the reader below will take care of padding if this is negative
        offset = track.first_sample - self.PREVIOUS_TRACK_FRAMES

        if offset > 0:
            offset -= reader.seek(offset)

        checksummer = audiotools.accuraterip.Checksum(
            total_pcm_frames=track.length_samples,
            sample_rate=cd.CDA_SAMLES_PER_SEC,
            is_first=track.is_first,
            is_last=track.is_last,
            pcm_frame_range=self.PREVIOUS_TRACK_FRAMES + 1 +
            self.NEXT_TRACK_FRAMES,
            accurateripv2_offset=self.PREVIOUS_TRACK_FRAMES)

        window_reader = audiotools.PCMReaderWindow(
            reader, offset, self.PREVIOUS_TRACK_FRAMES + track.length_samples +
            self.NEXT_TRACK_FRAMES)

        audiotools.transfer_data(window_reader.read, checksummer.update)

        checksums_v1 = checksummer.checksums_v1()

        crc1_by_offset: Dict[int, AccurateRipTrackID1] = {
            i: AccurateRipTrackID1(c)
            for i, c in enumerate(checksums_v1, -self.PREVIOUS_TRACK_FRAMES)
        }

        return crc1_by_offset

    def checksum_disc_audiotools(
        self
    ) -> Dict[cd.TrackNr, Dict[AccurateRipConfidence, AccurateRipTrackID1]]:
        results_by_track = {
            t.num: self.checksum_track(track=t)
            for t in self._disc.tracks
        }
        return results_by_track

    def ar_lookup(self) -> None:
        accuraterip_id = self._disc.id_accuraterip()

        #print(f"Fetching {accuraterip_id.url}")
        print("  Looking up disc in accuraterip database... ", end='')
        response = urllib.request.urlopen(accuraterip_id.url)
        if response.status == 200:
            print("found!")
            self._ar_results = AccurateRipResults.parse_accuraterip_bin(
                response.read(), accuraterip_id)
        elif response.status == 404:
            print("not found :(")
            self._ar_results = None
        else:
            raise AccurateRipException(
                f"Couldn't fetch accuraterip entry: {response.status}: {response.reason}"
            )

    def find_confidence_track(
            self, track: cd.TrackNr) -> Tuple[AccurateRipConfidence, int]:
        # list of (crc1,confidence) tuples
        ar_crcs = self.ar_results.get_track_crc1(track)
        # dict of {offset: crc1} pairs
        track_crcs = self.checksum_track(self._disc.tracks[track - 1])

        for ar_crc1, confidence in ar_crcs:
            for offset, track_crc in track_crcs.items():
                if ar_crc1 == track_crc:
                    return confidence, offset

        return 0, 0

    def find_confidence(
            self) -> Optional[Dict[cd.TrackNr, AccurateRipConfidence]]:
        print("Matching disc with Acucuraterip database...")

        if self.ar_results is None:
            print("No accuraterip results found for disc")
            return None

        confidences: Dict[cd.TrackNr, AccurateRipConfidence] = dict()
        for t in self._disc.track_nums():
            confidence, offset = self.find_confidence_track(t)
            confidences[t] = confidence

            print(f"  - Track {t:-2d}: ", end="")
            if confidence > 0:
                print(
                    f"found matching crc at offset {offset} with confidence {confidence}"
                )
            else:
                print(f"no matching crc found for track {t}")

        return confidences
Example #10
0
with open('log.txt', mode='w') as log:

    for i in range(1, args.forks + 1):
        pid = os.fork()
        if (pid != 0):
            child_pids.append(pid)
            child, _ = os.waitpid(0, os.WNOHANG)
            if child != 0:
                print(
                    f'child with pid={child} exited with code={code}. parent with pid id={os.getpid()} exited'
                )
                exit(2)
        else:
            parent_pid = os.getppid()
            parent_path = PosixPath(f'/proc/{parent_pid}')
            for j in range(args.iterations * i):
                if not parent_path.exists():
                    print(
                        f'parent with pid={parent_pid} exited. child with pid={os.getpid()} exited'
                    )
                    exit(1)
                print(f'fork = {i}\titeration = {j}\tmessage = {args.message}',
                      file=log)
                sleep(1)
            exit(0)

while True:
    child, code = os.waitpid(0, os.WNOHANG)
    if child != 0:
        print(
Example #11
0
def job_transfer_ks2(probe_path):

    assert (isinstance(probe_path, str))

    def _get_volume_usage_percentage(vol):
        cmd = f'df {vol}'
        res, _ = _run_command(cmd)
        size_list = re.split(' +', res.split('\n')[-1])
        per_usage = int(size_list[4][:-1])
        return per_usage

    # First check disk availability
    space = _get_volume_usage_percentage('/mnt/s0')
    # If we are less than 80% full we can transfer more stuff
    if space < 80:
        # Transfer data from flatiron to s3
        cmd = f'ssh -i ~/.ssh/mayo_alyx.pem -p {FLATIRON_PORT} ' \
              f'{FLATIRON_USER}@{FLATIRON_HOST} ./transfer_to_aws.sh {probe_path}'
        result, error = _run_command(cmd)

        # Check that command has run as expected and output info to logger
        if not result:
            _logger.error(
                f'{probe_path}: Could not transfer data from FlatIron to s3 \n'
                f'Error: {error}')
            return
        else:
            _logger.info(f'{probe_path}: Data transferred from FlatIron to s3')

        # Transfer data from s3 to /mnt/s0/Data on aws
        session = str(PosixPath(probe_path).parent.parent)
        cmd = f'aws s3 sync s3://ibl-ks2-storage/{session} "/mnt/s0/Data/{session}"'
        result, error = _run_command(cmd)

        # Check that command has run as expected and output info to logger
        if not result:
            _logger.error(
                f'{probe_path}: Could not transfer data from s3 to aws \n'
                f'Error: {error}')
            return
        else:
            _logger.info(f'{probe_path}: Data transferred from s3 to aws')

        # Rename the files to get rid of eid associated with each dataset
        session_path = Path(root_path).joinpath(session)
        for file in session_path.glob('**/*'):
            if len(Path(file.stem).suffix) == 37:
                file.rename(
                    Path(file.parent,
                         str(Path(file.stem).stem) + file.suffix))
                _logger.info(
                    f'Renamed dataset {file.stem} to {str(Path(file.stem).stem)}'
                )
            else:
                _logger.warning(f'Dataset {file.stem} not renamed')
                continue

        # Create a sort_me.flag
        cmd = f'touch /mnt/s0/Data/{session}/sort_me.flag'
        result, error = _run_command(cmd)
        _logger.info(f'{session}: sort_me.flag created')

        # Remove files from s3
        cmd = f'aws s3 rm --recursive s3://ibl-ks2-storage/{session}'
        result, error = _run_command(cmd)
        if not result:
            _logger.error(f'{session}: Could not remove data from s3 \n'
                          f'Error: {error}')
            return
        else:
            _logger.info(f'{session}: Data removed from s3')

        return
    def test_dummy_game_instance_calls(self,
        mocked_mkdir: Mock,
        mocked_symlink_to: Mock,
        mocked_copy: Mock,
        mocked_rmtree: Mock,
        mocked_run: Mock) -> None:

        # Arrange
        unittest.util._MAX_LENGTH=999999999 # :snake:

        # Act
        with DummyGameInstance(
            Path('/game-instance'),
            Path('/ckan.exe'),
            Path('/repo/metadata.tar.gz'),
            GameVersion('1.8.1'),
            [GameVersion('1.8.0')],
            Path('/cache')):

            pass

        # Assert
        self.assertEqual(mocked_mkdir.mock_calls, [call()])
        self.assertEqual(mocked_symlink_to.mock_calls, [
            call(PosixPath('/cache'))
        ])
        self.assertEqual(mocked_copy.mock_calls, [
            call(PosixPath('/game-instance/CKAN/registry.json'),
                 PosixPath('/tmp/registry.json'))
        ])
        self.assertEqual(mocked_rmtree.mock_calls, [
            call(PosixPath('/game-instance'))
        ])
        self.assertEqual(mocked_run.mock_calls, [
            call(['mono', PosixPath('/ckan.exe'), 'instance', 'fake',
                  '--set-default', '--headless', 'dummy',
                  PosixPath('/game-instance'), '1.8.1',
                  '--MakingHistory', '1.1.0', '--BreakingGround', '1.0.0'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'compat', 'add', '1.8.0'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'cache', 'set', PosixPath('/cache'), '--headless'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'cache', 'setlimit', '5000'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'repo', 'add',
                  'local', 'file:///repo/metadata.tar.gz'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'update'],
                 capture_output=True),
            call(['mono', PosixPath('/ckan.exe'), 'instance', 'forget', 'dummy'],
                 capture_output=True)
        ])
Example #13
0
import subprocess
from threading import Thread, Semaphore, Event
from queue import Queue
from time import sleep
import shlex
from PIL import Image, ImageDraw
import sys
from shutil import copyfile

RED = '\033[31m'
GREEN = '\033[32m'
BLUE = '\033[34m'
YELLOW = '\033[33m'
OFF = '\033[0m'

DIST_DIR = PosixPath('dist/Numix-HIDPI')
CURSOR_DIST = DIST_DIR.joinpath('cursors/')

BUILD_DIR = PosixPath('build/')
CURSOR_OUTPUT = BUILD_DIR.joinpath('cursor/')
ICON_OUTPUT = BUILD_DIR.joinpath('icons/')

CMD_TMPL = 'inkscape %s -o %s --export-dpi %s'

DPI = OrderedDict([(90, 24), (120, 30), (160, 40), (180, 45), (200, 50),
                   (220, 55), (240, 60), (320, 80)])


class WatchedProcess(Thread):
    """
		A light wrapper around a Popen object
Example #14
0
# APPLY -10
# HELP -11

################################################################
# Pathlib, cross-platform replacement for os.path and much more.
################################################################

>>> from pathlib import Path, PosixPath

# For paths in Python3=only projects, consider using pathlib rather than os.path
# https://docs.python.org/3/library/pathlib.html#pathlib.Path
Path.home()    # the user's homedir

>>> rock =  Path('~', 'Music', 'Rock').expanduser()
>>> rock
PosixPath('/home/akkana/Music/Rock')
>>> g = rock.rglob('*.mp3')
>>> g
<generator object Path.rglob at 0x7f630d9e48d0>
>>> list(g)
# ... list of PosixPath objects
# rglob is recursive, glob is nonrecursive unless the pattern starts with **/
# which means “this directory and all subdirectories, recursively”.

>>> p = Path()
>>> p
PosixPath('.')
>>> p.resolve()
PosixPath('/home/username/pathlib')

# Other methods include .mkdir(), .rmdir(), .unlink().rename(), .exists(),
Example #15
0
    def test_dir(self, sorting_pictures):
        result = sorting_pictures.search_directory('sample-images')

        assert sorted(result) == sorted([
            PosixPath('sample-images/IMG_20171022_010203_01.jpg'),
            PosixPath('sample-images/IMG_20171022_124203.unknown_suffix'),
            PosixPath('sample-images/IMG_NO_PARSE.jpg'),
            PosixPath('sample-images/VID'),
            PosixPath('sample-images/metadata-copy.jpg'),
            PosixPath('sample-images/metadata.jpg'),
            PosixPath('sample-images/no-m'),
            PosixPath('sample-images/no-metadata'),
            PosixPath(
                'sample-images/no-metadata/20170112_110943-ANIMATION.gif'),
            PosixPath('sample-images/no-metadata/20171022_010203.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20171022_124203.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20171022_124203_01.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20171104_104157.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20171104_104157_01.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20171104_104158~.jpg'),
            PosixPath('sample-images/no-metadata/IMG_20181001_124203.gif'),
            PosixPath('sample-images/no-metadata/IMG~20171104~104159~.jpg'),
            PosixPath(
                'sample-images/no-metadata/Screenshot_20171007-143321.png'),
            PosixPath('sample-images/no-metadata/VID_20180724_173611.mp4'),
            PosixPath('sample-images/no-metadata.jpg')
        ])
Example #16
0
import sys
from pathlib import Path, PosixPath, PurePath

#dir_csv = "/home/lgro/git/ics2csv/temp/calendars/adm.office/result"
#dir_ics = "/home/lgro/git/ics2csv/temp/calendars/adm.office/"
format_date = "%m/%d/%Y"
format_time = "%H:%M"
format_date_time = f"{format_date} {format_time}"

if len(sys.argv) == 3:
    dir_ics = sys.argv[1]
    dir_csv = sys.argv[2]
else:
    print(f"Eh necessario dois parametros de entrada: dir_origem, dir_destino")

dir_csv = PosixPath(dir_csv).expanduser()
dir_ics = PosixPath(dir_ics).expanduser()

if dir_ics.is_dir():
    print(f"Tratando arquivos ics do diretorio: {dir_ics}")
else:
    print("Diretorio com arquivos ics nao existe")
    exit(1)

if not dir_csv.is_dir():
    print(
        f"Diretorio para os arquivo csv nao existe, o mesmo sera criado: {dir_csv}"
    )
    try:
        dir_csv.mkdir()
    except OSError:
Example #17
0
    def test_successful_run_move(self, sorting_pictures, tmp_path):
        src = tmp_path / 'src'
        dest = tmp_path / 'dest'

        if src.exists():
            shutil.rmtree(src)
        if dest.exists():
            shutil.rmtree(dest)

        shutil.copytree('sample-images', src, symlinks=True)

        sorting_pictures.sort_images(src, dest, move=True, run=True)

        result = sorting_pictures.search_directory(dest)
        result = [p.relative_to(tmp_path) for p in result]

        assert sorted(result) == sorted([
            PosixPath('dest/2017-01'),
            PosixPath('dest/2017-01/IMG_20170112_110943.gif'),
            PosixPath('dest/2017-10'),
            PosixPath('dest/2017-10/IMG_20171007_143321.png'),
            PosixPath('dest/2017-10/IMG_20171022_010203.jpg'),
            PosixPath('dest/2017-10/IMG_20171022_124203-1.jpg'),
            PosixPath('dest/2017-10/IMG_20171022_124203.jpg'),
            PosixPath('dest/2017-11'),
            PosixPath('dest/2017-11/IMG_20171104_104157.jpg'),
            PosixPath('dest/2017-11/IMG_20171104_104158.jpg'),
            PosixPath('dest/2017-11/IMG_20171104_104159.jpg'),
            PosixPath('dest/2018-07'),
            PosixPath('dest/2018-07/VID_20180724_173611.mp4'),
            PosixPath('dest/2018-10'),
            PosixPath('dest/2018-10/IMG_20181001_124203.gif')
        ])

        result = sorting_pictures.search_directory(src)
        result = [p.relative_to(tmp_path) for p in result]
        assert sorted(result) == sorted([
            PosixPath('src/metadata-copy.jpg'),
            PosixPath('src/IMG_20171022_010203_01.jpg'),
            PosixPath('src/VID'),
            PosixPath('src/IMG_20171022_124203.unknown_suffix'),
            PosixPath('src/IMG_NO_PARSE.jpg'),
            PosixPath('src/no-m'),
            PosixPath('src/no-metadata.jpg'),
            PosixPath('src/metadata.jpg'),
            PosixPath('src/no-metadata'),
        ])

        log = sorting_pictures.log
        log['parse'] = [p.relative_to(tmp_path) for p in log['parse']]
        log['suffix'] = [p.relative_to(tmp_path) for p in log['suffix']]
        log['collisions'] = [(p_s.relative_to(tmp_path),
                              p_d.relative_to(tmp_path))
                             for (p_s, p_d) in log['collisions']]
        expected = {
            'collisions':
            [(PosixPath('src/IMG_20171022_010203_01.jpg'),
              PosixPath('dest/2017-10/IMG_20171022_010203.jpg'))],
            'parse': [
                PosixPath('src/metadata-copy.jpg'),
                PosixPath('src/IMG_NO_PARSE.jpg'),
                PosixPath('src/no-metadata.jpg'),
                PosixPath('src/metadata.jpg'),
            ],
            'suffix': [
                PosixPath('src/VID'),
                PosixPath('src/IMG_20171022_124203.unknown_suffix')
            ]
        }

        log = {k: sorted(v) for k, v in log.items()}
        expected = {k: sorted(v) for k, v in expected.items()}
        assert log == expected
Example #18
0
def train_single(
    datadir,
    outdir,
    pre_exec,
    test_size,
    early_stop,
    use_sklearn,
    use_xgboost,
    learning_rate,
    num_leaves,
    min_child_samples,
    max_depth,
    reg_lambda,
    auto_region,
):
    """Execute single training round."""
    if pre_exec is not None:
        exec(PosixPath(pre_exec).read_text())

    from tdub.ml_train import single_training
    import pandas as pd
    import numpy as np

    datadir = PosixPath(datadir)
    df = pd.read_hdf(datadir / "df.h5", "df")
    y = np.load(datadir / "labels.npy")
    w = np.load(datadir / "weights.npy")
    df.selection_used = (
        datadir / "selection.txt"
    ).read_text().strip()
    extra_sum = {
        "region": PosixPath(datadir / "region.txt").read_text().strip(),
        "nlo_method": PosixPath(datadir / "nlo_method.txt").read_text().strip(),
    }

    train_axes = dict(
        learning_rate=learning_rate,
        num_leaves=num_leaves,
        min_child_samples=min_child_samples,
        max_depth=max_depth,
        reg_lambda=reg_lambda,
    )

    if auto_region:
        from tdub.ml_train import default_bdt_parameters
        train_axes = default_bdt_parameters(extra_sum["region"])
    else:
        train_axes = dict(
            learning_rate=learning_rate,
            num_leaves=num_leaves,
            min_child_samples=min_child_samples,
            max_depth=max_depth,
            reg_lambda=reg_lambda,
        )

    single_training(
        df,
        y,
        w,
        train_axes,
        outdir,
        test_size=test_size,
        early_stopping_rounds=early_stop,
        extra_summary_entries=extra_sum,
        use_sklearn=use_sklearn,
        use_xgboost=use_xgboost,
    )
Example #19
0
 def getLogPath(self):
     return str(PosixPath('~/work/uilogs/').expanduser())
Example #20
0
def train_scan(
    datadir,
    workspace,
    pre_exec,
    early_stop,
    test_size,
    overwrite,
    and_submit,
):
    """Perform a parameter scan via condor jobs.

    DATADIR points to the intput ROOT files, training is performed on
    the REGION and all output is saved to WORKSPACE.

    $ tdub train scan /data/path 2j2b scan_2j2b

    """
    if pre_exec is not None:
        exec(PosixPath(pre_exec).read_text())

    from tdub.batch import create_condor_workspace
    import tdub.config
    import itertools

    ws = create_condor_workspace(workspace, overwrite=overwrite)
    (ws / "res").mkdir()

    runs = []
    i = 0
    if pre_exec is None:
        pre_exec = "_NONE"
    else:
        pre_exec = str(PosixPath(pre_exec).resolve())

    pd = tdub.config.DEFAULT_SCAN_PARAMETERS
    itr = itertools.product(
        pd.get("max_depth"),
        pd.get("num_leaves"),
        pd.get("learning_rate"),
        pd.get("min_child_samples"),
        pd.get("reg_lambda"),
    )

    log.info("Scan grid:")
    log.info(" - max_depth: {}".format(pd.get("max_depth")))
    log.info(" - num_leaves: {}".format(pd.get("num_leaves")))
    log.info(" - learning_rate: {}".format(pd.get("learning_rate")))
    log.info(" - min_child_samples: {}".format(pd.get("min_child_samples")))
    log.info(" - reg_lambda: {}".format(pd.get("reg_lambda")))

    for (max_depth, num_leaves, learning_rate, min_child_samples, reg_lambda) in itr:
        suffix = "{}-{}-{}-{}-{}".format(
            max_depth, num_leaves, learning_rate, min_child_samples, reg_lambda,
        )
        outdir = ws / "res" / f"{i:04d}_{suffix}"
        arglist = (
            "{} {} -p {} -s {} "
            "--learning-rate {} "
            "--num-leaves {} "
            "--min-child-samples {} "
            "--max-depth {} "
            "--reg-lambda {} "
            "--early-stop {} "
        ).format(
            datadir,
            outdir,
            pre_exec,
            test_size,
            learning_rate,
            num_leaves,
            min_child_samples,
            max_depth,
            reg_lambda,
            early_stop,
        )
        arglist = arglist.replace("-p _NONE ", "")
        runs.append(arglist)
        i += 1

    with (ws / "run.sh").open("w") as outscript:
        print("#!/bin/bash\n\n", file=outscript)
        for run in runs:
            print(f"tdub train single {run}\n", file=outscript)
    os.chmod(ws / "run.sh", 0o755)

    import pycondor

    condor_dag = pycondor.Dagman(name="dag_train_scan", submit=str(ws / "sub"))
    condor_job_scan = pycondor.Job(
        name="job_train_scan",
        universe="vanilla",
        getenv=True,
        notification="Error",
        extra_lines=["notify_user = [email protected]"],
        executable=shutil.which("tdub"),
        submit=str(ws / "sub"),
        error=str(ws / "err"),
        output=str(ws / "out"),
        log=str(ws / "log"),
        dag=condor_dag,
    )
    for run in runs:
        condor_job_scan.add_arg(f"train single {run}")
    condor_job_check = pycondor.Job(
        name="job_train_check",
        universe="vanilla",
        getenv=True,
        notification="Error",
        extra_lines=["notify_user = [email protected]"],
        executable=shutil.which("tdub"),
        submit=str(ws / "sub"),
        error=str(ws / "err"),
        output=str(ws / "out"),
        log=str(ws / "log"),
        dag=condor_dag,
    )
    condor_job_check.add_arg(f"train check {ws}")
    condor_job_check.add_parent(condor_job_scan)

    if and_submit:
        condor_dag.build_submit()
    else:
        condor_dag.build()

    # log.info(f"prepared {len(runs)} jobs for submission")
    # with (ws / "condor.sub").open("w") as f:
    #     condor_preamble(ws, shutil.which("tdub"), memory="2GB", GetEnv=True, to_file=f)
    #     for run in runs:
    #         add_condor_arguments(f"train-single {run}", f)
    # if and_submit:
    #     condor_submit(workspace)

    return 0
Example #21
0
# Variações do PurePath.
from pathlib import PurePath, Path
from pathlib import PurePosixPath, PureWindowsPath
from pathlib import WindowsPath, PosixPath

diretorio = Path('/usr/bin')
print(diretorio.parts)

diretorio = PurePath('/usr/bin')
print(diretorio.parts)

diretorio = PurePosixPath('/usr/bin')
print(diretorio.parts)

diretorio = PureWindowsPath('/usr/bin')
print(diretorio.parts)

try:
    diretorio = PosixPath('/usr/bin')
    print(diretorio.parts)
except NotImplementedError as erro:
    print('PoxisPath,não implementado nesse sistema.')

try:
    diretorio = WindowsPath('/usr/bin')
    print(diretorio.parts)
except NotImplementedError as erro:
    print('WindowsPath,não implementado nesse sistema.')
Example #22
0
 def __init__(self, store_path: str) -> None:
     self._store_path = PosixPath(store_path)
Example #23
0
    def __init__(self, args, append_log_file=False):
        output_dir_parent = Path('simulation_results')
        if not output_dir_parent.exists():
            output_dir_parent.mkdir()

        self.args = args

        mode = 'o3'
        if args.in_order:
            mode = 'inorder'
        # elif args.invisispec:
        #     mode = 'invisispec_{}'.format(args.scheme)
        elif args.flag_config != 'empty':
            mode = args.flag_config

        self.summary_path = output_dir_parent / '{}_{}_summary.json'.format(
            args.bench, mode)

        self.summary = defaultdict(dict)
        if self.summary_path.exists() and not args.force_rerun:
            print('\tLoading existing results.')
            with self.summary_path.open() as f:
                raw_dict = json.load(f)
                for k, v in raw_dict.items():
                    self.summary[k] = v
        elif self.summary_path.exists() and args.force_rerun:
            print('\tIgnoring old summary file to force rerun.')

        self.summary['mode'] = mode
        self.summary['bench'] = args.bench
        self.summary['successful_checkpoints'] = 0
        self.summary['failed_checkpoints'] = 0

        assert args.checkpoint_dir is not None

        chkdir = Path(args.checkpoint_dir)
        dirents = Utils.get_directory_entries_by_time(chkdir)
        if 'checkpoints' in self.summary:
            self.chkpts = [x for x in dirents if x.is_dir()]
            rm_count = 0
            for chk, status in self.summary['checkpoints'].items():
                chk_path = PosixPath(chk)
                if chk_path in self.chkpts and status != 'not run':
                    rm_count += 1
                    self.chkpts.remove(chk_path)
                    if status == 'failed':
                        self.summary['failed_checkpoints'] += 1
                    elif status == 'successful':
                        self.summary['successful_checkpoints'] += 1
                elif chk_path not in self.chkpts and status != 'not run':
                    if status == 'failed':
                        self.summary['failed_checkpoints'] += 1
                    elif status == 'successful':
                        self.summary['successful_checkpoints'] += 1

            print('\tRemoved {} checkpoints from consideration.'.format(
                rm_count))
        else:
            self.chkpts = [x for x in dirents if x.is_dir()]

        exp_args = {}

        invalid_counter = 0
        # Always update this, for it could change!
        self.summary['total_checkpoints'] = len(self.chkpts)

        self.result_files = {}
        for chkpt in self.chkpts:
            pmem_file = chkpt / 'system.physmem.store0.pmem'
            if not pmem_file.exists():
                invalid_counter += 1
                self.summary['checkpoints'][str(chkpt)] = 'invalid'
                #print('{} -- invalid checkpoint, skipping'.format(str(chkpt)))
                continue
            self.summary['checkpoints'][str(chkpt)] = 'not run'
            output_dir = output_dir_parent / '{}_{}_{}'.format(
                args.bench, mode, str(chkpt.name))
            arg_list = [
                '--bench', args.bench, '--suite', args.suite, '--warmup-insts',
                str(args.warmup_insts), '--reportable-insts',
                str(args.reportable_insts), '--start-checkpoint',
                str(chkpt), '--output-dir',
                str(output_dir), '--flag-config',
                str(args.flag_config)
            ]
            if args.in_order:
                arg_list += ['--in-order']
            # if args.invisispec:
            #     arg_list += ['--invisispec', '--scheme', args.scheme]
            exp_args[str(chkpt)] = arg_list

            result_file = output_dir / 'res.json'
            self.result_files[str(chkpt)] = result_file

        if 'invalid_counter' not in self.summary:
            self.summary['invalid_checkpoints'] = invalid_counter

        if invalid_counter == len(self.chkpts):
            raise Exception('No valid checkpoints to simulate with!')
        elif invalid_counter > 0:
            print('Skipping {} invalid checkpoints'.format(invalid_counter))

        self.num_checkpoints = len(
            exp_args) + self.summary['successful_checkpoints']
        if args.num_checkpoints is not None:
            self.num_checkpoints = min(args.num_checkpoints,
                                       self.num_checkpoints)
            if self.num_checkpoints < args.num_checkpoints:
                print(
                    'Warning: Requested {} checkpoints, but only {} are available.'
                    .format(args.num_checkpoints, self.num_checkpoints))

        self.all_proc_args = exp_args
        self.max_procs = int(args.pool_size)
        self.log_file = args.log_file
        self.append = append_log_file
        self.timeout_seconds = (60.0 * 60.0)
Example #24
0
class KassLocustP3Desktop(AbstractKassLocustP3):
    """A class for running KassLocust on a desktop."""

    _working_dir_container = PosixPath('/') / 'workingdir'
    _command_script_name = 'locustcommands.sh'
    _container = CONFIG.container
    _max_workers = int(CONFIG.desktop_parallel_jobs)

    def __init__(self, working_dir, direct=True):
        """
        Parameters
        ----------
        working_dir : str
            The string for the path of the working directory
        """

        AbstractKassLocustP3.__init__(self, working_dir, direct)

    def __call__(self, sim_config_list):
        """This method overrides :meth:`AbstractKassLocustP3.__call__`.
        
        Runs a list of simulation jobs in parallel.
        
        Parameters
        ----------
        sim_config_list : list
            A list of SimConfig objects
        """

        print('Running jobs in Locust')
        with cf.ThreadPoolExecutor(max_workers=self._max_workers) as executor:

            futures = [
                executor.submit(self._submit, sim_config)
                for sim_config in sim_config_list
            ]

            for future in tqdm(cf.as_completed(futures), total=len(futures)):
                future.result()

    def _submit(self, sim_config: SimConfig):
        #Submit the job with the given SimConfig
        #Creates all the necessary configuration files, directories and the
        #json output

        output_dir = self._working_dir / sim_config.sim_name
        output_dir.mkdir(parents=True, exist_ok=True)

        locust_file = output_dir / LOCUST_CONFIG_NAME
        kass_file = output_dir / KASS_CONFIG_NAME
        config_dump = output_dir / SIM_CONFIG_NAME

        sim_config.make_config_file(locust_file, kass_file)
        sim_config.to_json(config_dump)
        self._gen_command_script(output_dir)

        cmd = self._assemble_command(output_dir)

        with open(output_dir / 'log.out',
                  'w+') as log, open(output_dir / 'log.err', 'w+') as err:
            p = subprocess.Popen(cmd, shell=True, stdout=log, stderr=err)

        p.wait()
        #fix stty; for some reason the multithreading with docker breaks the shell
        subprocess.Popen('stty sane', shell=True).wait()

    def _assemble_command(self, output_dir):
        #Assemble the docker command that runs the KassLocust simulation in the
        #p8compute container

        docker_run = 'docker run -it --rm'

        bash_command = ('"' +
                        str(OUTPUT_DIR_CONTAINER / self._command_script_name) +
                        ' ' + str(OUTPUT_DIR_CONTAINER / LOCUST_CONFIG_NAME) +
                        '"')

        docker_command = '/bin/bash -c ' + bash_command

        # share_working_dir = _gen_shared_dir_string(self._working_dir,
        #                                     self._working_dir_container)

        share_output_dir = _gen_shared_dir_string(output_dir,
                                                  OUTPUT_DIR_CONTAINER)

        share_hexbug_dir = _gen_shared_dir_string(HEXBUG_DIR,
                                                  HEXBUG_DIR_CONTAINER)

        # cmd = _char_concatenate(' ', docker_run, share_working_dir,
        #                             share_output_dir, share_hexbug_dir,
        #                             self._container, docker_command)
        cmd = _char_concatenate(' ', docker_run, share_output_dir,
                                share_hexbug_dir, self._container,
                                docker_command)

        return cmd

    def _gen_command_script(self, output_dir):
        #Generate the bash script with the commands for running locust
        #This script will be called from inside the container

        shebang = '#!/bin/bash'
        p8_env = _char_concatenate(' ', 'source',
                                   str(self._p8_compute_dir / 'setup.sh'))
        kasper_env = _char_concatenate(
            ' ', 'source', str(self._p8_locust_dir / 'bin' / 'kasperenv.sh'))
        locust = 'LocustSim config=$1'

        commands = _char_concatenate('\n', shebang, p8_env, kasper_env, locust)

        script = output_dir / self._command_script_name
        with open(script, 'w') as out_file:
            out_file.write(commands)

        subprocess.Popen('chmod +x ' + str(script), shell=True).wait()
Example #25
0
def convert_to_localfs(apps, schema_editor):
    Project = apps.get_model("pootle_project.Project")
    Store = apps.get_model("pootle_store.Store")
    StoreFS = apps.get_model("pootle_fs.StoreFS")
    Config = apps.get_model("pootle_config.Config")
    Language = apps.get_model("pootle_language.Language")
    ContentType = apps.get_model("contenttypes.ContentType")
    project_ct = ContentType.objects.get_for_model(Project)
    old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY

    for project in Project.objects.exclude(treestyle="pootle_fs"):
        logger.debug("Converting project '%s' to pootle fs", project.code)
        proj_trans_path = str(PosixPath().joinpath(old_translation_path,
                                                   project.code))
        proj_stores = Store.objects.filter(
            translation_project__project=project).exclude(file="").exclude(
                obsolete=True)
        old_treestyle, old_path = (_detect_treestyle_and_path(
            Config, Language, project, proj_trans_path)
                                   if project.treestyle in ["auto", "gnu"] else
                                   (project.treestyle, None))
        _set_project_config(Language, Config, project_ct, project)
        project.treestyle = "pootle_fs"
        project.save()

        if project.disabled:
            continue
        if not os.path.exists(proj_trans_path):
            logger.warn(
                "Missing project ('%s') translation directory '%s', "
                "skipped adding tracking", project.code, proj_trans_path)
            continue
        store_fs = StoreFS.objects.filter(
            store__translation_project__project=project)
        store_fs.delete()
        sfs = []
        templates = []
        for store in proj_stores:
            filepath = str(store.file)[len(project.code):]
            fullpath = str(PosixPath().joinpath(proj_trans_path,
                                                filepath.lstrip("/")))
            if not os.path.exists(fullpath):
                logger.warn("No file found at '%s', not adding tracking",
                            fullpath)
                continue
            if store.is_template and old_treestyle == "gnu":
                templates.append(store)
            sfs.append(
                StoreFS(project=project,
                        store=store,
                        path=str(filepath),
                        pootle_path=store.pootle_path,
                        last_sync_hash=str(os.stat(fullpath).st_mtime),
                        last_sync_revision=store.last_sync_revision,
                        last_sync_mtime=store.file_mtime))
        if len(sfs):
            StoreFS.objects.bulk_create(sfs, batch_size=1000)
        if old_treestyle == "gnu" and len(templates) == 1:
            template = templates[0]
            template_name, __ = posixpath.splitext(template.name)
            if template_name != "templates":
                try:
                    mapping = Config.objects.get(
                        content_type=project_ct,
                        object_pk=project.pk,
                        key="pootle.core.lang_mapping")
                except Config.DoesNotExist:
                    mapping = {}
                mapping[template_name] = "templates"
                Config.objects.update_or_create(content_type=project_ct,
                                                object_pk=project.pk,
                                                key="pootle.core.lang_mapping",
                                                defaults=dict(value=mapping))
        logger.debug("Tracking added for %s/%s stores in project '%s'",
                     len(sfs), proj_stores.count(), project.code)
        fs_temp = os.path.join(settings.POOTLE_FS_WORKING_PATH, project.code)
        dirsync.sync(str(proj_trans_path),
                     fs_temp,
                     "sync",
                     create=True,
                     purge=True,
                     logger=logging.getLogger(dirsync.__name__))
Example #26
0
import sys
from pathlib import PosixPath

marabou_path = PosixPath(r"/cs/labs/guykatz/yoni_mantzur/marabou")

sys.path.append(str(marabou_path))

import os

# marabou_path = Path(r'/mnt/c/Users/t-yomant/lab/Marabou')

path_to_sigmoids = marabou_path / r'resources/tf/frozen_graph/sigmoids/'
path_to_experiments = marabou_path / r'maraboupy/examples/networks/sigmoids'

from maraboupy import MarabouCore, MarabouNetwork, Marabou, MarabouUtils

import numpy as np

try:
    exp_num = max(
        map(
            int,
            map(
                lambda y: ''.join(y),
                map(
                    lambda f: filter(lambda c: c.isdigit(), f),
                    filter(lambda dir: 'experiment_' in dir,
                           os.listdir(path_to_experiments)))))) + 1
except ValueError:
    exp_num = 1
Example #27
0
import pathlib
from pathlib import PosixPath
import os

from csqa.version import VERSION as __version__


DATA_DIR = (
    os.getenv('CSQA_DATA') or
    os.path.join(PosixPath(__file__).absolute().parents[1].as_posix(), 'data')
)
class isri_unlv_config:
    MAIN_DIR = PosixPath('../data/isri_unlv/')
    FILE_DIR = PosixPath('../data/bbox/isri_unlv_bbox.pickle')
    cat2id = cat2id
 def test_logfile_path_absolute(self, mock_open, mock_stdout):
     self.subject(["streamlink", "--logfile", "/foo/bar"])
     self.write_file_and_assert(mock_mkdir=PosixPath("/foo").mkdir,
                                mock_write=mock_open("/foo/bar", "a").write,
                                mock_stdout=mock_stdout)
Example #30
0
 def absolute(self):
     return PosixPath("fakedir/.env")