Ejemplo n.º 1
0
def apply_patchqueue(base_repo, pq_repo, prefix):
    """
    Link and then apply a patchqueue repository to a source repository
    """
    status_path = Path(pq_repo.working_dir, prefix, 'status')
    patches_link = Path(base_repo.git_dir, 'patches',
                        base_repo.active_branch.name)

    # make the directory tree for the patches within the base repo
    # pylint: disable=no-member
    patches_link.parent.mkdir(parents=True)

    # link the patchqueue directory for the base repo branch
    rel_path = relpath(str(status_path.parent), str(patches_link.parent))
    patches_link.symlink_to(rel_path)

    # create an empty status file
    with status_path.open('w'):
        pass

    patches = subprocess.check_output(['guilt', 'series'],
                                      cwd=base_repo.working_dir)
    if patches:
        subprocess.check_call(['guilt', 'push', '--all'],
                              cwd=base_repo.working_dir)
Ejemplo n.º 2
0
def archive_resource(resource, destination):
    """
    Write an archive of a resource
    """
    archive_path = Path(destination, resource.basename)
    if resource.is_repo:
        temp_dir = tempfile.mkdtemp(prefix='clone-')
        try:
            repo = clone(resource.url, temp_dir, resource.commitish)
            logging.debug("Archiving %s@%s to %s", resource.url,
                          resource.commitish, archive_path)
            with archive_path.open("wb") as output:
                repo.archive(output, treeish=str(resource.commitish),
                             prefix=resource.prefix)
        finally:
            shutil.rmtree(temp_dir, ignore_errors=True)
    else:
        url = urlparse(resource.url)
        if url.scheme in SUPPORTED_URL_SCHEMES:
            logging.debug("Fetching %s to %s", resource.url, archive_path)
            fetch_url(url, str(archive_path), 5)
        elif url.scheme in ['', 'file'] and url.netloc == '':
            logging.debug("Copying %s to %s", url.path, archive_path)
            shutil.copyfile(url.path, str(archive_path))
    # else: UnsupportedScheme

    return archive_path
Ejemplo n.º 3
0
def create_repo_from_spec(spec_path, top_path, repo_path):
    """
    Invoke the prep phase of rpmbuild to generate a source directory then
    create a git repo from it
    """
    top_dir = top_path.resolve()
    cmd = ['rpmbuild', '-bp', '--nodeps',
           '--define', '_topdir '+str(top_dir), str(spec_path)]
    logging.debug("Running %s", ' '.join(cmd))
    subprocess.check_call(cmd)

    # move the created build directory under the repo directory
    build_path = list(Path(top_path, 'BUILD').glob('*'))[0]
    rename(str(build_path), str(repo_path))

    git_dir = Path(repo_path, '.git')
    if git_dir.exists():
        # setup already created a git repo
        repo = git.Repo(str(repo_path))
    else:
        repo = git.Repo.init(str(repo_path))
        index = repo.index
        index.add(repo.untracked_files)
        index.commit("Repo generated by planex-clone")

    return repo
Ejemplo n.º 4
0
    def process(self, path):
        path = Path(path)

        if path.is_dir():
            self.process_files_in(path)
        else:
            self.process_one_file(path)
Ejemplo n.º 5
0
def dump_flight_to_kml(flight, kml_filename_local):
    """Dumps the flight to KML format.

    Args:
        flight: an igc_lib.Flight, the flight to be saved
        kml_filename_local: a string, the name of the output file
    """
    assert flight.valid
    kml = simplekml.Kml()

    def add_point(name, fix):
        kml.newpoint(name=name, coords=[(fix.lon, fix.lat)])

    coords = []
    for fix in flight.fixes:
        coords.append((fix.lon, fix.lat))
    kml.newlinestring(coords=coords)

    add_point(name="Takeoff", fix=flight.takeoff_fix)
    add_point(name="Landing", fix=flight.landing_fix)

    for i, thermal in enumerate(flight.thermals):
        add_point(name="thermal_%02d" % i, fix=thermal.enter_fix)
        add_point(name="thermal_%02d_END" % i, fix=thermal.exit_fix)
        kml_filename = Path(kml_filename_local).expanduser().absolute()
    kml.save(kml_filename.as_posix())
Ejemplo n.º 6
0
def find_files(paths):
    result = []
    basePath = Path('nuxeo-tools-hooks/nxtools/hooks')
    for path in [basePath.glob(path) for path in paths]:
        result += path

    return [str(path.relative_to(basePath)) for path in result if not path.relative_to(basePath).match('tests/**/*')]
Ejemplo n.º 7
0
    def __init__(self):
        self.defaults = {
            'configuration': None,
            'platforms': [],
        }
        self.xcode = None
        self.repo_overrides = dict()

        self.root_path = Path.cwd()  # type: Path

        self.library_directory = Path(os.path.expanduser('~/Library/io.schwa.Punic'))
        if not self.library_directory.exists():
            self.library_directory.mkdir(parents=True)
        self.repo_cache_directory = self.library_directory / 'repo_cache'
        if not self.repo_cache_directory.exists():
            self.repo_cache_directory.mkdir(parents=True)
        self.punic_path = self.root_path / 'Carthage'
        self.build_path = self.punic_path / 'Build'
        self.checkouts_path = self.punic_path / 'Checkouts'

        self.derived_data_path = self.library_directory / "DerivedData"

        runner.cache_path = self.library_directory / "cache.shelf"

        self.can_fetch = False
        self.xcode = Xcode.default()

        # Read in defaults from punic.yaml
        self.read(Path('punic.yaml'))
Ejemplo n.º 8
0
class TermiusApp(App):
    """Class for CLI application."""

    def __init__(self):
        """Construct new CLI application."""
        super(TermiusApp, self).__init__(
            description='Termius app',
            version=__version__,
            command_manager=CommandManager('termius.handlers'),
        )
        self.configure_signals()
        self.directory_path = Path(expanduser('~/.{}/'.format(self.NAME)))
        if not self.directory_path.is_dir():
            self.directory_path.mkdir(parents=True)

    def configure_logging(self):
        """Change logging level for request package."""
        super(TermiusApp, self).configure_logging()
        logging.getLogger('requests').setLevel(logging.WARNING)
        return

    # pylint: disable=no-self-use
    def configure_signals(self):
        """Bind subscribers to signals."""
        post_create_instance.connect(store_ssh_key, sender=SshKey)
        post_update_instance.connect(store_ssh_key, sender=SshKey)
        post_delete_instance.connect(delete_ssh_key, sender=SshKey)

        post_logout.connect(clean_data)
Ejemplo n.º 9
0
def main(src, dest):
    """links configfiles from one folder to another

    if links exists it verifies content
    if files exist at the target side it errors

    Args:
        src: source folder
        dest: target folder
    """
    src = Path(src)
    if not src.exists():
        print("WARNING:", src, "does not exist, skipping linking")
        return

    dest = Path(dest)

    for element in filter(_is_yaml_file, src.iterdir()):
        _warn_on_unknown_encryption(element)
        target = dest.joinpath(element.name)
        # the following is fragile
        if target.is_symlink():
            _warn_on_missmatching_symlink(src=element, target=target)
        elif target.is_file():
            _warn_on_existing_file(target)
        else:
            target.symlink_to(element.resolve())
Ejemplo n.º 10
0
def load_legacy(filename):
    m = Path(filename)
    name = m.stem
    d = {}
    c = count()
    r = True

    def num(s):
        try:
            return int(s)
        except ValueError:
            return float(s)

    with m.open() as f:
        while r:
            c.next()
            r = re.search("([^\d\W]+)\s+(-*\d+\.*\d*)", f.readline())
            if r:
                d[r.groups()[0]] = num(r.groups()[1])
    l = c.next() - 1
    data = np.loadtxt(str(m.resolve()), skiprows=l)
    dataset = NpDataset(data, resolution=d["cellsize"])
    if "UTMzone" in d:
        gp = GeoPoint(UTM("UTMzone"), d["xllcorner"], d["yllcorner"])
    else:
        gp = GeoPoint(UTM(1), d["xllcorner"], d["yllcorner"])
    return GridMesh(gp, dataset)
Ejemplo n.º 11
0
def new_page():
    from string import Template     # Use Python templates, not Mako templates

    slug = raw_input('Slug for page: ')
    title = raw_input('Title of page: ')
    template = raw_input('Template to inherit from (default is example.html): ')

    new_dir = Path('site') / slug
    if new_dir.exists():
        print '\nDirectory %s already exists, aborting' % new_dir
        return
    new_dir.mkdir()

    html_file = new_dir / 'index.html'
    with html_file.open('w') as fp:
        fp.write(Template(NEW_PAGE_HTML_TEMPLATE).substitute(
            title=repr(title.strip()), template=template.strip() or 'example.html'))

    js_file = new_dir / 'app.es6'
    with js_file.open('w') as fp:
        class_name = ''.join(s.capitalize() for s in title.split(' '))
        fp.write(Template(NEW_PAGE_JS_TEMPLATE).substitute(
            title=title, class_name=class_name))

    marker = '// This comment marks where new entry points will be added'
    new_entry = "'%s': './site/%s/app.es6'," % (slug, slug)
    code = open('webpack.config.js').read()
    with open('webpack.config.js', 'w') as fp:
        fp.write(code.replace(marker, new_entry + '\n    ' + marker))
Ejemplo n.º 12
0
def dump_thermals_to_cup_file(flight, cup_filename_local):
    """Dump flight's thermals to a .cup file (SeeYou).

    Args:
        flight: an igc_lib.Flight, the flight to be written
        cup_filename_local: a string, the name of the file to be written.
    """
    cup_filename = Path(cup_filename_local).expanduser().absolute()
    with cup_filename.open('wt') as wpt:
        wpt.write(u'name,code,country,lat,')
        wpt.write(u'lon,elev,style,rwdir,rwlen,freq,desc,userdata,pics\n')

        def write_fix(name, fix):
            lat = _degrees_float_to_degrees_minutes_seconds(fix.lat, 'lat')
            lon = _degrees_float_to_degrees_minutes_seconds(fix.lon, 'lon')
            wpt.write(u'"%s",,,%02d%02d.%03d%s,' % (
                name, lat.degrees, lat.minutes,
                int(round(lat.seconds/60.0*1000.0)), lat.hemisphere))
            wpt.write(u'%03d%02d.%03d%s,%fm,,,,,,,' % (
                lon.degrees, lon.minutes,
                int(round(lon.seconds/60.0*1000.0)), lon.hemisphere,
                fix.gnss_alt))
            wpt.write(u'\n')

        for i, thermal in enumerate(flight.thermals):
            write_fix(u'%02d' % i, thermal.enter_fix)
            write_fix(u'%02d_END' % i, thermal.exit_fix)
Ejemplo n.º 13
0
def gmx_mpi_linked(link):
    gmx_exe = distutils.spawn.find_executable('gmx')
    gmx_mpi = Path('~/gmx_mpi').expanduser()
    if not link:
        return ''
    else:
        gmx_mpi.symlink_to(gmx_exe)
        return str(gmx_mpi.expanduser())
Ejemplo n.º 14
0
def vim_plug():
    vim_plug_path = Path(VIM_DIR).expand_user().join('autoload').make_dirs()
    vim_plug_path = vim_plug_path.join('plug.vim')
    LOG.info('downloading vim-plug')
    r = requests.get(VIM_PLUG_URL)
    with open(vim_plug_path.path, 'w') as f:
        f.write(r.content)
    LOG.info('done')
Ejemplo n.º 15
0
def get_file(path):
    result = Path('web') / path
    if result.is_file():
        return str(result)
    if result.is_dir() and (result / 'index.html').is_file():
        return str(result / 'index.html')
    # File was not found.
    return None
Ejemplo n.º 16
0
def pytest_unconfigure(config):
    if config_existed:
        config_backup.rename(str(path_config))
    else:
        os.remove(str(path_config))
    if config.option.link_gmx_mpi:
        gmx_mpi = Path('~/gmx_mpi').expanduser()
        gmx_mpi.unlink()
Ejemplo n.º 17
0
 def generate_ssh_key_instance(self, path, storage):
     """Generate ssh key from file."""
     private_key_path = Path(path)
     instance = SshKey(
         private_key=private_key_path.read_text(),
         label=private_key_path.name
     )
     self.validate_ssh_key(instance, storage)
     return instance
Ejemplo n.º 18
0
def datadir(original_datadir, tmpdir):
    # Method from: https://github.com/gabrielcnr/pytest-datadir
    # License: MIT
    import shutil
    result = Path(str(tmpdir.join(original_datadir.stem)))
    if original_datadir.is_dir():
        shutil.copytree(str(original_datadir), str(result))
    else:
        result.mkdir()
    return result
Ejemplo n.º 19
0
def _get_data(request, data_type):
    data_dir = Path(DATA_PATH)
    result = None
    for file in data_dir.iterdir():
        parts = file.stem.split('__')
        test_name = request.node.name.split('test_')[-1]
        test_name = test_name[:test_name.index('[')] if '[' in test_name else test_name
        if parts[0] == data_type:
            if parts[1] == test_name or (parts[1] == 'default' and result is None):
                result = yaml.safe_load(file.read_text())
    return result
Ejemplo n.º 20
0
def find_spec(package):
    """
    From a package name locate the spec file
    """
    spec_search = Configuration.get('spec', 'search-path',
                                    default='SPECS').split(':')
    for subdir in spec_search:
        path = Path(subdir, package+'.spec')
        if path.exists():
            return path
    return None
Ejemplo n.º 21
0
def find_link_pin(package):
    """
    From a package name locate the link or pin file
    """
    pin_search = Configuration.get('pin', 'search-path',
                                   default='SPECS').split(':')
    for suffix in ('.pin', '.lnk'):
        for subdir in pin_search:
            path = Path(subdir, package+suffix)
            if path.exists():
                return path
    return None
Ejemplo n.º 22
0
 def _local_url(path):
     """Copy a filepath into the static dir if required
     """
     path = Path(path).resolve()
     # if file is already below static in the hierarchy, don't do anything
     if static in path.parents:
         return path.relative_to(base)
     # otherwise copy the file into static
     static.mkdir(parents=True, exist_ok=True)
     local = static / path.name
     copyfile(str(path), str(local))  # only need str for py<3.6
     return str(local.relative_to(base))
Ejemplo n.º 23
0
def check_text_files(obtained_fn, expected_fn, fix_callback=lambda x: x, encoding=None):
    """
    Compare two files contents. If the files differ, show the diff and write a nice HTML
    diff file into the data directory.
    :param Path obtained_fn: path to obtained file during current testing.
    :param Path expected_fn: path to the expected file, obtained from previous testing.
    :param str encoding: encoding used to open the files.
    :param callable fix_callback:
        A callback to "fix" the contents of the obtained (first) file.
        This callback receives a list of strings (lines) and must also return a list of lines,
        changed as needed.
        The resulting lines will be used to compare with the contents of expected_fn.
    """
    __tracebackhide__ = True

    obtained_fn = Path(obtained_fn)
    expected_fn = Path(expected_fn)
    obtained_lines = fix_callback(obtained_fn.read_text(encoding=encoding).splitlines())
    expected_lines = expected_fn.read_text(encoding=encoding).splitlines()

    if obtained_lines != expected_lines:
        diff_lines = list(difflib.unified_diff(expected_lines, obtained_lines))
        if len(diff_lines) <= 500:
            html_fn = obtained_fn.with_suffix(".diff.html")
            try:
                differ = difflib.HtmlDiff()
                html_diff = differ.make_file(
                    fromlines=expected_lines,
                    fromdesc=expected_fn,
                    tolines=obtained_lines,
                    todesc=obtained_fn,
                )
            except Exception as e:
                html_fn = "(failed to generate html diff: %s)" % e
            else:
                html_fn.write_text(html_diff, encoding="UTF-8")

            diff = ["FILES DIFFER:", str(expected_fn), str(obtained_fn)]
            diff += ["HTML DIFF: %s" % html_fn]
            diff += diff_lines
            raise AssertionError("\n".join(diff))
        else:
            # difflib has exponential scaling and for thousands of lines it starts to take minutes to render
            # the HTML diff.
            msg = [
                "Files are different, but diff is too big (%s lines)" % (len(diff_lines),),
                "- obtained: %s" % (obtained_fn,),
                "- expected: %s" % (expected_fn,),
            ]
            raise AssertionError("\n".join(msg))
Ejemplo n.º 24
0
def cmd_run(path):
    """
    Runs an appliction.
    """
    os.chdir(path)
    package = Path("./package.json")
    if not package.is_file():
        raise Exception("Invalid package: no package.json file")

    package = json.load(package.open())

    if "engines" not in package or package["engines"] == {}:
        raise Exception("Invalid package: no engines specified")

    r = requests.get("%s/index.json" % Particle.REPO)
    r.raise_for_status()
    remote_particles = r.json()["particles"]

    variables = {}
    for name, range_ in package["engines"].items():
        p = Particle.get_local(name, range_)
        if not p:
            # if auto_fetch:
            if name in remote_particles:
                v = semver.max_satisfying(remote_particles[name], range_, False)
                if v:
                    print("Downloading %s %s..." % (name, v))
                    p = Particle.fetch(name, v)
                else:
                    print("Cannot satisfy %s (%s), aborting." % (name, range_))
                    sys.exit(1)
            else:
                print("No particle named %s exists, aborting." % name)
                sys.exit(1)
        variables["$" + name.upper().replace("-", "_")] = str(p.main)

    pattern = re.compile('|'.join(map(re.escape, variables.keys())))

    if "lepton" not in package:
        raise Exception("Invalid package: no lepton key in particle.json")
    elif "run" not in package["lepton"]:
        raise Exception("Invalid package: no lepton.run key in particle.json")

    args = package["lepton"]["run"]
    args = pattern.sub(lambda x: variables[x.group()], args)
    args = shlex.split(args)
    print("Resulting command line: %r" % args)
    print("Current dir: %s" % os.getcwd())
    os.execvp(args[0], args)
Ejemplo n.º 25
0
 def check(self, path_pairs):
     result = RepoCheckResult()
     for repo_path, system_path in path_pairs:
         LOG.debug('checking "{}" <-> "{}"...'.format(repo_path, system_path))
         repo = Path(self.repo_path).joinpath(repo_path)
         system = Path(system_path)
         pair = RepoPathPair(repo, system)
         repo = repo.expanduser()
         system = system.expanduser()
         pair.update(repo, system)
         status = diffcheck(repo, system)
         LOG.debug('done, status: {}'.format(status))
         pair.status = status
         result.add_pair(pair)
     return result
Ejemplo n.º 26
0
def data_path(relative_path, relative_to=None):
    """Returns data path to test file."""

    if relative_to is None:
        # Use BASE_DIR as default.
        relative_to = BASE_DIR
    elif not isinstance(relative_to, Path):
        # Ensure relative_to is a Path.
        relative_to = Path(relative_to)

    # If relative_to is not a path, move up one level.
    if not relative_to.is_dir():
        relative_to = relative_to.parent

    return relative_to / 'data' / relative_path
    def del_oldest_tile(self):
        """ Deletes the oldest tile from the cache. """
        arr = self._get_cache_arr()
        oldestAddr = None
        oldestTs = Inf
        for k,v in arr.iteritems():
            if v.get('ts',Inf) < oldestTs:
                oldestTs = v.get('tx',Inf)
                oldestAddr = k

        if oldestAddr is not None:
            p = Path(arr[oldestAddr].get('path',None))
            if p is None: raise IOError('Invalid Path!')
            p.unlink()
        else:
            raise IOError('No tiles to delete!')
Ejemplo n.º 28
0
def test_update_and_build():
    if quick_tests_only:
        return

    source = Path(__file__).parent / 'Examples'
    destination = Path(tempfile.mkdtemp()) / 'Examples'

    shutil.copytree(source, destination)

    project_paths = [path for path in destination.iterdir() if path.is_dir()]

    for project_path in project_paths:

        with work_directory(project_path):

            output = runner.check_run('punic update')
Ejemplo n.º 29
0
 def __init__(self, path, remote_url=None, branch_name='master'):
     self.path = Path(path)
     self.path_str = str(self.path)
     self.remote_url = remote_url
     self.branch_name = branch_name
     db_latest_key = '%s:%s:%s' % (self.path_str, remote_url or '',
                                      branch_name)
     self.db_latest_key = sha256(db_latest_key).hexdigest()
Ejemplo n.º 30
0
Archivo: git.py Proyecto: pmac/bedrock
    def __init__(self, path, remote_url=None, remote_name=None, branch_name='master'):
        self.path = Path(path)
        self.path_str = str(self.path)
        self.remote_url = remote_url
        self.branch_name = branch_name
        if not remote_name:
            remote_name = 'bedrock-dev' if settings.DEV else 'bedrock-prod'

        self.remote_name = remote_name
Ejemplo n.º 31
0
def _save_XDG_path(xdg_dir, *dirname):
    subdir = Path(xdg_dir).joinpath(*dirname)
    subdir.mkdir(parents=True, exist_ok=True)
    return subdir
Ejemplo n.º 32
0
def run(data_path,
        image_size=160,
        epochs=10,
        batch_size=32,
        learning_rate=0.0001,
        output='model',
        dataset=None):
    img_shape = (image_size, image_size, 3)

    info('Loading Data Set')
    # load dataset
    train, test, val, labels = load_dataset(data_path, dataset)

    # training data
    train_data, train_labels = zip(*train)
    train_ds = Dataset.zip((Dataset.from_tensor_slices(list(train_data)),
                            Dataset.from_tensor_slices(list(train_labels))))

    train_ds = train_ds.map(map_func=process_image, num_parallel_calls=5)

    train_ds = train_ds.apply(tf.data.experimental.ignore_errors())

    train_ds = train_ds.batch(batch_size)
    train_ds = train_ds.prefetch(buffer_size=5)
    train_ds = train_ds.repeat()

    # model
    info('Creating Model')
    base_model = tf.keras.applications.MobileNetV2(input_shape=img_shape,
                                                   include_top=False,
                                                   weights='imagenet')
    base_model.trainable = True

    model = tf.keras.Sequential([
        base_model,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    # training
    info('Training')
    steps_per_epoch = math.ceil(len(train) / batch_size)
    history = model.fit(train_ds,
                        epochs=epochs,
                        steps_per_epoch=steps_per_epoch)

    # save model
    info('Saving Model')

    # check existence of base model folder
    output = check_dir(output)

    print('Serializing into saved_model format')
    tf.saved_model.save(model, str(output))
    print('Done!')

    # add time prefix folder
    file_output = str(Path(output).joinpath('latest.h5'))
    print('Serializing h5 model to:\n{}'.format(file_output))
    model.save(file_output)
from pathlib2 import Path
from . import muscodSSH as ssh
tryWarmStart = True

print("run bench with feasibility criterion")

com = fullBody.getCenterOfMass()
if com[0] > 1.25:
    success = True
else:
    success = False

numConf = len(configs)
if success:
    # muscod without warm start :
    if Path(CONTACT_SEQUENCE_WHOLEBODY_FILE).is_file():
        os.remove(CONTACT_SEQUENCE_WHOLEBODY_FILE)
    filename_xml = OUTPUT_DIR + "/" + OUTPUT_SEQUENCE_FILE
    mp.generate_muscod_problem(filename_xml, True)
    successMuscod, ssh_ok = ssh.call_muscod()
    time.sleep(
        5.
    )  # wait for sync of the ~/home (worst case, usually 0.1 is enough ... )
    muscodConverged = successMuscod and Path(
        CONTACT_SEQUENCE_WHOLEBODY_FILE).is_file()

    if tryWarmStart:
        if not success:
            # generate warm start from planning :
            if Path(CONTACT_SEQUENCE_WHOLEBODY_FILE).is_file():
                os.remove(CONTACT_SEQUENCE_WHOLEBODY_FILE)
Ejemplo n.º 34
0
import json
from pathlib2 import Path

with open(str(Path(__file__).parent / Path('typedef.json')), 'r') as f:
    TYPEDEF = json.load(f)

with open(str(Path(__file__).parent / Path('feerate.json')), 'r') as f:
    FEERATE = json.load(f)

API_URL = 'http://47.75.57.213:3001/api'
Ejemplo n.º 35
0
FREQUENCY_666 = 666
LEVEL_OFFSET_666 = find_level_offset_by_frequency(
    "DVBC_FREQUENCY_LEVEL_OFFSET", 666)
LEVEL_50_666 = str("%.2f" % (-50 - LEVEL_OFFSET_666))

PARAMETER_LIST = [[
    MODULATION_64QAM, FREQUENCY_666, LEVEL_OFFSET_666, LEVEL_50_666, -50, 27
], [MODULATION_256QAM, FREQUENCY_666, LEVEL_OFFSET_666, LEVEL_50_666, -50, 35]]

SYMBOL_RATE_LIST = [
    SYMBOL_RATE_6952, SYMBOL_RATE_6875, SYMBOL_RATE_6000, SYMBOL_RATE_5000,
    SYMBOL_RATE_4000
]

my_file = Path("../../ekt_json/dvbc_4_symbol_rate.json")
if my_file.exists():
    pass
else:
    dict_test_parame_result = {}
    list_test_parame_result = []

    for PARAMETER in PARAMETER_LIST:
        list_test_result = []
        for SYMBOL_RATE in SYMBOL_RATE_LIST:
            list_test_result.append([SYMBOL_RATE, None])
        list_test_parame_result.append([
            PARAMETER[0], PARAMETER[1], PARAMETER[2], PARAMETER[3],
            PARAMETER[4], PARAMETER[5], list_test_result
        ])
    dict_test_parame_result["test_parame_result"] = list_test_parame_result
Ejemplo n.º 36
0
 def get_stderr_path_with_postfix(self, postfix):
     return Path("{}_{}".format(
         self.__syslog_ng_paths["file_paths"]["stderr"], postfix))
Ejemplo n.º 37
0
def _default_settings():
    # type: () -> cmk.ec.settings.Settings
    """Returns default EC settings. This function should vanish in the long run!"""
    return cmk.ec.settings.settings('', Path(cmk.utils.paths.omd_root),
                                    Path(cmk.utils.paths.default_config_dir),
                                    [''])
Ejemplo n.º 38
0
from pathlib2 import Path

dac_dir = Path(__file__).parent
trajector_dir = dac_dir / 'trajs'
project_dir = dac_dir.parent.parent
# results_dir = project_dir / 'results'

results_dir = dac_dir / 'results'

trained_model_dir = dac_dir / 'trained_model'

trained_model_dir_rela = './trained_model'

if not trajector_dir.is_dir():
    trajector_dir.mkdir()

if not results_dir.is_dir():
    results_dir.mkdir()

if not trained_model_dir.is_dir():
    trained_model_dir.mkdir()
Ejemplo n.º 39
0
def interesting(cli_args, temp_prefix):
    """Interesting if the binary crashes with a possibly-desired signature on the stack.

    Args:
        cli_args (list): List of input arguments.
        temp_prefix (str): Temporary directory prefix, e.g. tmp1/1 or tmp4/1

    Returns:
        bool: True if the intended signature shows up on the stack, False otherwise.
    """
    parser = argparse.ArgumentParser(
        prog="crashesat",
        usage=(
            re.search("python.*[2-3]", os.__file__).group(0).replace("/", "") +
            " -m lithium %(prog)s [options] binary [flags] testcase.ext"))
    parser.add_argument(
        "-r",
        "--regex",
        action="store_true",
        default=False,
        help="Allow search for regular expressions instead of strings.")
    parser.add_argument(
        "-s",
        "--sig",
        default="",
        type=str,
        help="Match this crash signature. Defaults to '%default'.")
    parser.add_argument(
        "-t",
        "--timeout",
        default=120,
        type=int,
        help="Optionally set the timeout. Defaults to '%default' seconds.")
    parser.add_argument("cmd_with_flags", nargs=argparse.REMAINDER)
    args = parser.parse_args(cli_args)

    log = logging.getLogger(__name__)

    # Examine stack for crash signature, this is needed if args.sig is specified.
    runinfo = timed_run.timed_run(args.cmd_with_flags, args.timeout,
                                  temp_prefix)
    if runinfo.sta == timed_run.CRASHED:
        os_ops.grab_crash_log(args.cmd_with_flags[0], runinfo.pid, temp_prefix,
                              True)

    crash_log = Path(temp_prefix + "-crash.txt")
    time_str = " (%.3f seconds)" % runinfo.elapsedtime

    if runinfo.sta == timed_run.CRASHED:
        if crash_log.resolve().is_file():  # pylint: disable=no-member
            # When using this script, remember to escape characters, e.g. "\(" instead of "(" !
            if file_contains(str(crash_log), args.sig, args.regex)[0]:
                log.info("Exit status: %s%s", runinfo.msg, time_str)
                return True
            log.info("[Uninteresting] It crashed somewhere else!%s", time_str)
            return False
        log.info(
            "[Uninteresting] It appeared to crash, but no crash log was found?%s",
            time_str)
        return False
    log.info("[Uninteresting] It didn't crash.%s", time_str)
    return False
Ejemplo n.º 40
0
import os
from pathlib2 import Path

__all__ = [
    'CONFIG_FILE', 'DIR_LONG', 'DIR_SMALL', 'DIR_MEDIUM', 'DNS_LONG',
    'DNS_SMALL', 'DNS_MEDUIM', 'DB_FILE'
]

SRC_PATH = Path.joinpath(Path(os.path.abspath(os.path.dirname(__file__))),
                         "../")

# config
CONFIG_FILE = Path.joinpath(SRC_PATH, "../config.ini")

# Dirs bruteforce
DIR_LONG = Path.joinpath(SRC_PATH, "wordlist/long_dir.txt")
DIR_SMALL = Path.joinpath(SRC_PATH, "wordlist/small_dir.txt")
DIR_MEDIUM = Path.joinpath(SRC_PATH, "wordlist/medium_dir.txt")

# DNS bruteforce
DNS_LONG = Path.joinpath(SRC_PATH, "wordlist/long_dns.txt")
DNS_SMALL = Path.joinpath(SRC_PATH, "wordlist/small_dns.txt")
DNS_MEDUIM = Path.joinpath(SRC_PATH, "wordlist/medium_dns.txt")

# Datanbases
DB_FOLDER = Path.joinpath(SRC_PATH, "db/")
DB_FILE = Path.joinpath(DB_FOLDER, "scan.db")
Ejemplo n.º 41
0
"""
TRAINS - Artificial Intelligence Version Control
https://github.com/allegroai/trains
"""

# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from six import exec_
from pathlib2 import Path

here = Path(__file__).resolve().parent

# Get the long description from the README file
long_description = (here / 'README.md').read_text()


def read_version_string():
    result = {}
    exec_((here / 'trains/version.py').read_text(), result)
    return result['__version__']


version = read_version_string()

requirements = (here / 'requirements.txt').read_text().splitlines()

setup(
    name='trains',
    version=version,
    description=
    'TRAINS - Auto-Magical Experiment Manager & Version Control for AI',
Ejemplo n.º 42
0
    def create_task(self, dry_run=False):
        # type: (bool) -> Union[Task, Dict]
        """
        Create the new populated Task

        :param dry_run: Optional, If True do not create an actual Task, instead return the Task definition as dict
        :return: newly created Task object
        """
        local_entry_file = None
        repo_info = None
        if self.folder or (self.script and Path(self.script).is_file()
                           and not self.repo):
            self.folder = os.path.expandvars(os.path.expanduser(
                self.folder)) if self.folder else None
            self.script = os.path.expandvars(os.path.expanduser(
                self.script)) if self.script else None
            self.cwd = os.path.expandvars(os.path.expanduser(
                self.cwd)) if self.cwd else None
            if Path(self.script).is_file():
                entry_point = self.script
            else:
                entry_point = (Path(self.folder) / self.script).as_posix()
            entry_point = os.path.abspath(entry_point)
            if not os.path.isfile(entry_point):
                raise ValueError(
                    "Script entrypoint file \'{}\' could not be found".format(
                        entry_point))

            local_entry_file = entry_point
            repo_info, requirements = ScriptInfo.get(
                filepaths=[entry_point],
                log=getLogger(),
                create_requirements=self.packages is True,
                uncommitted_from_remote=True,
                detect_jupyter_notebook=False,
                add_missing_installed_packages=True,
                detailed_req_report=False,
            )

        # check if we have no repository and no requirements raise error
        if self.raise_on_missing_entries and (not self.requirements_file and not self.packages) \
                and not self.repo and (
                not repo_info or not repo_info.script or not repo_info.script.get('repository')):
            raise ValueError(
                "Standalone script detected \'{}\', but no requirements provided"
                .format(self.script))

        if dry_run:
            task = None
            task_state = dict(
                name=self.task_name,
                project=Task.get_project_id(self.project_name),
                type=str(self.task_type or Task.TaskTypes.training),
            )  # type: dict
            if self.output_uri:
                task_state['output'] = dict(destination=self.output_uri)
        else:
            task_state = dict(script={})

            if self.base_task_id:
                if self.verbose:
                    print('Cloning task {}'.format(self.base_task_id))
                task = Task.clone(source_task=self.base_task_id,
                                  project=Task.get_project_id(
                                      self.project_name))

                self._set_output_uri(task)
            else:
                # noinspection PyProtectedMember
                task = Task._create(task_name=self.task_name,
                                    project_name=self.project_name,
                                    task_type=self.task_type
                                    or Task.TaskTypes.training)

                self._set_output_uri(task)

                # if there is nothing to populate, return
                if not any([
                        self.folder, self.commit, self.branch, self.repo,
                        self.script, self.cwd, self.packages,
                        self.requirements_file, self.base_task_id
                ] + (list(self.docker.values()))):
                    return task

        # clear the script section
        task_state['script'] = {}

        if repo_info:
            task_state['script']['repository'] = repo_info.script['repository']
            task_state['script']['version_num'] = repo_info.script[
                'version_num']
            task_state['script']['branch'] = repo_info.script['branch']
            task_state['script']['diff'] = repo_info.script['diff'] or ''
            task_state['script']['working_dir'] = repo_info.script[
                'working_dir']
            task_state['script']['entry_point'] = repo_info.script[
                'entry_point']
            task_state['script']['binary'] = repo_info.script['binary']
            task_state['script']['requirements'] = repo_info.script.get(
                'requirements') or {}
            if self.cwd:
                self.cwd = self.cwd
                cwd = self.cwd if Path(self.cwd).is_dir() else (
                    Path(repo_info.script['repo_root']) / self.cwd).as_posix()
                if not Path(cwd).is_dir():
                    raise ValueError(
                        "Working directory \'{}\' could not be found".format(
                            cwd))
                cwd = Path(cwd).relative_to(
                    repo_info.script['repo_root']).as_posix()
                entry_point = \
                    Path(repo_info.script['repo_root']) / repo_info.script['working_dir'] / repo_info.script[
                        'entry_point']
                entry_point = entry_point.relative_to(cwd).as_posix()
                task_state['script']['entry_point'] = entry_point or ""
                task_state['script']['working_dir'] = cwd or "."
        elif self.repo:
            # normalize backslashes and remove first one
            entry_point = '/'.join(
                [p for p in self.script.split('/') if p and p != '.'])
            cwd = '/'.join(
                [p for p in (self.cwd or '.').split('/') if p and p != '.'])
            if cwd and entry_point.startswith(cwd + '/'):
                entry_point = entry_point[len(cwd) + 1:]
            task_state['script']['repository'] = self.repo
            task_state['script']['version_num'] = self.commit or None
            task_state['script']['branch'] = self.branch or None
            task_state['script']['diff'] = ''
            task_state['script']['working_dir'] = cwd or '.'
            task_state['script']['entry_point'] = entry_point or ""
        else:
            # standalone task
            task_state['script']['entry_point'] = self.script or ""
            task_state['script']['working_dir'] = '.'

        # update requirements
        reqs = []
        if self.requirements_file:
            with open(self.requirements_file.as_posix(), 'rt') as f:
                reqs = [line.strip() for line in f.readlines()]
        if self.packages and self.packages is not True:
            reqs += self.packages
        if reqs:
            # make sure we have clearml.
            clearml_found = False
            for line in reqs:
                if line.strip().startswith('#'):
                    continue
                package = reduce(lambda a, b: a.split(b)[0], "#;@=~<>",
                                 line).strip()
                if package == 'clearml':
                    clearml_found = True
                    break
            if not clearml_found:
                reqs.append('clearml')
            task_state['script']['requirements'] = {'pip': '\n'.join(reqs)}
        elif not self.repo and repo_info and not repo_info.script.get(
                'requirements'):
            # we are in local mode, make sure we have "requirements.txt" it is a must
            reqs_txt_file = Path(
                repo_info.script['repo_root']) / "requirements.txt"
            poetry_toml_file = Path(
                repo_info.script['repo_root']) / "pyproject.toml"
            if self.raise_on_missing_entries and not reqs_txt_file.is_file(
            ) and not poetry_toml_file.is_file():
                raise ValueError("requirements.txt not found [{}] "
                                 "Use --requirements or --packages".format(
                                     reqs_txt_file.as_posix()))

        if self.add_task_init_call:
            script_entry = ('/' +
                            task_state['script'].get('working_dir', '.') +
                            '/' + task_state['script']['entry_point'])
            if platform == "win32":
                script_entry = os.path.normpath(script_entry).replace(
                    '\\', '/')
            else:
                script_entry = os.path.abspath(script_entry)
            idx_a = 0
            lines = None
            # find the right entry for the patch if we have a local file (basically after __future__
            if local_entry_file:
                with open(local_entry_file, 'rt') as f:
                    lines = f.readlines()
                future_found = self._locate_future_import(lines)
                if future_found >= 0:
                    idx_a = future_found + 1

            task_init_patch = ''
            if self.repo or task_state.get('script', {}).get('repository'):
                # if we do not have requirements, add clearml to the requirements.txt
                if not reqs:
                    task_init_patch += \
                        "diff --git a/requirements.txt b/requirements.txt\n" \
                        "--- a/requirements.txt\n" \
                        "+++ b/requirements.txt\n" \
                        "@@ -0,0 +1,1 @@\n" \
                        "+clearml\n"

                # Add Task.init call
                task_init_patch += \
                    "diff --git a{script_entry} b{script_entry}\n" \
                    "--- a{script_entry}\n" \
                    "+++ b{script_entry}\n" \
                    "@@ -{idx_a},0 +{idx_b},3 @@\n" \
                    "+from clearml import Task\n" \
                    "+Task.init()\n" \
                    "+\n".format(
                        script_entry=script_entry, idx_a=idx_a, idx_b=idx_a + 1)
            elif local_entry_file and lines:
                # if we are here it means we do not have a git diff, but a single script file
                init_lines = ["from clearml import Task\n", "Task.init()\n\n"]
                task_state['script']['diff'] = ''.join(lines[:idx_a] +
                                                       init_lines +
                                                       lines[idx_a:])
                # no need to add anything, we patched it.
                task_init_patch = ""
            else:
                # Add Task.init call
                task_init_patch += \
                    "from clearml import Task\n" \
                    "Task.init()\n\n"

            # make sure we add the diff at the end of the current diff
            task_state['script']['diff'] = task_state['script'].get('diff', '')
            if task_state['script']['diff'] and not task_state['script'][
                    'diff'].endswith('\n'):
                task_state['script']['diff'] += '\n'
            task_state['script']['diff'] += task_init_patch

        # set base docker image if provided
        if self.docker:
            if dry_run:
                task_state['container'] = dict(
                    image=self.docker.get('image') or '',
                    arguments=self.docker.get('args') or '',
                    setup_shell_script=self.docker.get('bash_script') or '',
                )
            else:
                task.set_base_docker(
                    docker_image=self.docker.get('image'),
                    docker_arguments=self.docker.get('args'),
                    docker_setup_bash_script=self.docker.get('bash_script'),
                )

        if self.verbose:
            if task_state['script']['repository']:
                repo_details = {
                    k: v
                    for k, v in task_state['script'].items()
                    if v and k not in ('diff', 'requirements', 'binary')
                }
                print('Repository Detected\n{}'.format(
                    json.dumps(repo_details, indent=2)))
            else:
                print('Standalone script detected\n  Script: {}'.format(
                    self.script))

            if task_state['script'].get('requirements') and \
                    task_state['script']['requirements'].get('pip'):
                print('Requirements:{}{}'.format(
                    '\n  Using requirements.txt: {}'.format(
                        self.requirements_file.as_posix())
                    if self.requirements_file else '',
                    '\n  {}Packages: {}'.format(
                        'Additional ' if self.requirements_file else '',
                        self.packages) if self.packages else ''))
            if self.docker:
                print('Base docker image: {}'.format(self.docker))

        if dry_run:
            return task_state

        # update the Task
        task.update_task(task_state)
        self.task = task
        return task
Ejemplo n.º 43
0
class CreateAndPopulate(object):
    _VCS_SSH_REGEX = \
        "^" \
        "(?:(?P<user>{regular}*?)@)?" \
        "(?P<host>{regular}*?)" \
        ":" \
        "(?P<path>{regular}.*)?" \
        "$" \
        .format(
            regular=r"[^/@:#]"
        )

    def __init__(
            self,
            project_name=None,  # type: Optional[str]
            task_name=None,  # type: Optional[str]
            task_type=None,  # type: Optional[str]
            repo=None,  # type: Optional[str]
            branch=None,  # type: Optional[str]
            commit=None,  # type: Optional[str]
            script=None,  # type: Optional[str]
            working_directory=None,  # type: Optional[str]
            packages=None,  # type: Optional[Union[bool, Sequence[str]]]
            requirements_file=None,  # type: Optional[Union[str, Path]]
            docker=None,  # type: Optional[str]
            docker_args=None,  # type: Optional[str]
            docker_bash_setup_script=None,  # type: Optional[str]
            output_uri=None,  # type: Optional[str]
            base_task_id=None,  # type: Optional[str]
            add_task_init_call=True,  # type: bool
            raise_on_missing_entries=False,  # type: bool
            verbose=False,  # type: bool
    ):
        # type: (...) -> None
        """
        Create a new Task from an existing code base.
        If the code does not already contain a call to Task.init, pass add_task_init_call=True,
        and the code will be patched in remote execution (i.e. when executed by `clearml-agent`

        :param project_name: Set the project name for the task. Required if base_task_id is None.
        :param task_name: Set the name of the remote task. Required if base_task_id is None.
        :param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
            'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
        :param repo: Remote URL for the repository to use, OR path to local copy of the git repository
            Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
        :param branch: Select specific repository branch/tag (implies the latest commit from the branch)
        :param commit: Select specific commit id to use (default: latest commit,
            or when used with local repository matching the local commit id)
        :param script: Specify the entry point script for the remote execution. When used in tandem with
            remote git repository the script should be a relative path inside the repository,
            for example: './source/train.py' . When used with local repository path it supports a
            direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
        :param working_directory: Working directory to launch the script from. Default: repository root folder.
            Relative to repo root or local folder.
        :param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"]
            or `True` to automatically create requirements
            based on locally installed packages (repository must be local).
        :param requirements_file: Specify requirements.txt file to install when setting the session.
            If not provided, the requirements.txt from the repository will be used.
        :param docker: Select the docker image to be executed in by the remote session
        :param docker_args: Add docker arguments, pass a single string
        :param docker_bash_setup_script: Add bash script to be executed
            inside the docker before setting up the Task's environment
        :param output_uri: Optional, set the Tasks's output_uri (Storage destination).
            examples: 's3://bucket/folder', 'https://server/' , 'gs://bucket/folder', 'azure://bucket', '/folder/'
        :param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
            Essentially clones an existing task and overrides arguments/requirements.
        :param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
        :param raise_on_missing_entries: If True raise ValueError on missing entries when populating
        :param verbose: If True print verbose logging
        """
        if repo and len(urlparse(repo).scheme) <= 1 and not re.compile(
                self._VCS_SSH_REGEX).match(repo):
            folder = repo
            repo = None
        else:
            folder = None

        if raise_on_missing_entries and not base_task_id:
            if not script:
                raise ValueError("Entry point script not provided")
            if not repo and not folder and not Path(script).is_file():
                raise ValueError(
                    "Script file \'{}\' could not be found".format(script))
        if raise_on_missing_entries and commit and branch:
            raise ValueError(
                "Specify either a branch/tag or specific commit id, not both (either --commit or --branch)"
            )
        if raise_on_missing_entries and not folder and working_directory and working_directory.startswith(
                '/'):
            raise ValueError(
                "working directory \'{}\', must be relative to repository root"
            )

        if requirements_file and not Path(requirements_file).is_file():
            raise ValueError("requirements file could not be found \'{}\'")

        self.folder = folder
        self.commit = commit
        self.branch = branch
        self.repo = repo
        self.script = script
        self.cwd = working_directory
        assert not packages or isinstance(packages, (tuple, list, bool))
        self.packages = list(packages) if packages is not None and not isinstance(packages, bool) \
            else (packages or None)
        self.requirements_file = Path(
            requirements_file) if requirements_file else None
        self.base_task_id = base_task_id
        self.docker = dict(image=docker,
                           args=docker_args,
                           bash_script=docker_bash_setup_script)
        self.add_task_init_call = add_task_init_call
        self.project_name = project_name
        self.task_name = task_name
        self.task_type = task_type
        self.output_uri = output_uri
        self.task = None
        self.raise_on_missing_entries = raise_on_missing_entries
        self.verbose = verbose

    def create_task(self, dry_run=False):
        # type: (bool) -> Union[Task, Dict]
        """
        Create the new populated Task

        :param dry_run: Optional, If True do not create an actual Task, instead return the Task definition as dict
        :return: newly created Task object
        """
        local_entry_file = None
        repo_info = None
        if self.folder or (self.script and Path(self.script).is_file()
                           and not self.repo):
            self.folder = os.path.expandvars(os.path.expanduser(
                self.folder)) if self.folder else None
            self.script = os.path.expandvars(os.path.expanduser(
                self.script)) if self.script else None
            self.cwd = os.path.expandvars(os.path.expanduser(
                self.cwd)) if self.cwd else None
            if Path(self.script).is_file():
                entry_point = self.script
            else:
                entry_point = (Path(self.folder) / self.script).as_posix()
            entry_point = os.path.abspath(entry_point)
            if not os.path.isfile(entry_point):
                raise ValueError(
                    "Script entrypoint file \'{}\' could not be found".format(
                        entry_point))

            local_entry_file = entry_point
            repo_info, requirements = ScriptInfo.get(
                filepaths=[entry_point],
                log=getLogger(),
                create_requirements=self.packages is True,
                uncommitted_from_remote=True,
                detect_jupyter_notebook=False,
                add_missing_installed_packages=True,
                detailed_req_report=False,
            )

        # check if we have no repository and no requirements raise error
        if self.raise_on_missing_entries and (not self.requirements_file and not self.packages) \
                and not self.repo and (
                not repo_info or not repo_info.script or not repo_info.script.get('repository')):
            raise ValueError(
                "Standalone script detected \'{}\', but no requirements provided"
                .format(self.script))

        if dry_run:
            task = None
            task_state = dict(
                name=self.task_name,
                project=Task.get_project_id(self.project_name),
                type=str(self.task_type or Task.TaskTypes.training),
            )  # type: dict
            if self.output_uri:
                task_state['output'] = dict(destination=self.output_uri)
        else:
            task_state = dict(script={})

            if self.base_task_id:
                if self.verbose:
                    print('Cloning task {}'.format(self.base_task_id))
                task = Task.clone(source_task=self.base_task_id,
                                  project=Task.get_project_id(
                                      self.project_name))

                self._set_output_uri(task)
            else:
                # noinspection PyProtectedMember
                task = Task._create(task_name=self.task_name,
                                    project_name=self.project_name,
                                    task_type=self.task_type
                                    or Task.TaskTypes.training)

                self._set_output_uri(task)

                # if there is nothing to populate, return
                if not any([
                        self.folder, self.commit, self.branch, self.repo,
                        self.script, self.cwd, self.packages,
                        self.requirements_file, self.base_task_id
                ] + (list(self.docker.values()))):
                    return task

        # clear the script section
        task_state['script'] = {}

        if repo_info:
            task_state['script']['repository'] = repo_info.script['repository']
            task_state['script']['version_num'] = repo_info.script[
                'version_num']
            task_state['script']['branch'] = repo_info.script['branch']
            task_state['script']['diff'] = repo_info.script['diff'] or ''
            task_state['script']['working_dir'] = repo_info.script[
                'working_dir']
            task_state['script']['entry_point'] = repo_info.script[
                'entry_point']
            task_state['script']['binary'] = repo_info.script['binary']
            task_state['script']['requirements'] = repo_info.script.get(
                'requirements') or {}
            if self.cwd:
                self.cwd = self.cwd
                cwd = self.cwd if Path(self.cwd).is_dir() else (
                    Path(repo_info.script['repo_root']) / self.cwd).as_posix()
                if not Path(cwd).is_dir():
                    raise ValueError(
                        "Working directory \'{}\' could not be found".format(
                            cwd))
                cwd = Path(cwd).relative_to(
                    repo_info.script['repo_root']).as_posix()
                entry_point = \
                    Path(repo_info.script['repo_root']) / repo_info.script['working_dir'] / repo_info.script[
                        'entry_point']
                entry_point = entry_point.relative_to(cwd).as_posix()
                task_state['script']['entry_point'] = entry_point or ""
                task_state['script']['working_dir'] = cwd or "."
        elif self.repo:
            # normalize backslashes and remove first one
            entry_point = '/'.join(
                [p for p in self.script.split('/') if p and p != '.'])
            cwd = '/'.join(
                [p for p in (self.cwd or '.').split('/') if p and p != '.'])
            if cwd and entry_point.startswith(cwd + '/'):
                entry_point = entry_point[len(cwd) + 1:]
            task_state['script']['repository'] = self.repo
            task_state['script']['version_num'] = self.commit or None
            task_state['script']['branch'] = self.branch or None
            task_state['script']['diff'] = ''
            task_state['script']['working_dir'] = cwd or '.'
            task_state['script']['entry_point'] = entry_point or ""
        else:
            # standalone task
            task_state['script']['entry_point'] = self.script or ""
            task_state['script']['working_dir'] = '.'

        # update requirements
        reqs = []
        if self.requirements_file:
            with open(self.requirements_file.as_posix(), 'rt') as f:
                reqs = [line.strip() for line in f.readlines()]
        if self.packages and self.packages is not True:
            reqs += self.packages
        if reqs:
            # make sure we have clearml.
            clearml_found = False
            for line in reqs:
                if line.strip().startswith('#'):
                    continue
                package = reduce(lambda a, b: a.split(b)[0], "#;@=~<>",
                                 line).strip()
                if package == 'clearml':
                    clearml_found = True
                    break
            if not clearml_found:
                reqs.append('clearml')
            task_state['script']['requirements'] = {'pip': '\n'.join(reqs)}
        elif not self.repo and repo_info and not repo_info.script.get(
                'requirements'):
            # we are in local mode, make sure we have "requirements.txt" it is a must
            reqs_txt_file = Path(
                repo_info.script['repo_root']) / "requirements.txt"
            poetry_toml_file = Path(
                repo_info.script['repo_root']) / "pyproject.toml"
            if self.raise_on_missing_entries and not reqs_txt_file.is_file(
            ) and not poetry_toml_file.is_file():
                raise ValueError("requirements.txt not found [{}] "
                                 "Use --requirements or --packages".format(
                                     reqs_txt_file.as_posix()))

        if self.add_task_init_call:
            script_entry = ('/' +
                            task_state['script'].get('working_dir', '.') +
                            '/' + task_state['script']['entry_point'])
            if platform == "win32":
                script_entry = os.path.normpath(script_entry).replace(
                    '\\', '/')
            else:
                script_entry = os.path.abspath(script_entry)
            idx_a = 0
            lines = None
            # find the right entry for the patch if we have a local file (basically after __future__
            if local_entry_file:
                with open(local_entry_file, 'rt') as f:
                    lines = f.readlines()
                future_found = self._locate_future_import(lines)
                if future_found >= 0:
                    idx_a = future_found + 1

            task_init_patch = ''
            if self.repo or task_state.get('script', {}).get('repository'):
                # if we do not have requirements, add clearml to the requirements.txt
                if not reqs:
                    task_init_patch += \
                        "diff --git a/requirements.txt b/requirements.txt\n" \
                        "--- a/requirements.txt\n" \
                        "+++ b/requirements.txt\n" \
                        "@@ -0,0 +1,1 @@\n" \
                        "+clearml\n"

                # Add Task.init call
                task_init_patch += \
                    "diff --git a{script_entry} b{script_entry}\n" \
                    "--- a{script_entry}\n" \
                    "+++ b{script_entry}\n" \
                    "@@ -{idx_a},0 +{idx_b},3 @@\n" \
                    "+from clearml import Task\n" \
                    "+Task.init()\n" \
                    "+\n".format(
                        script_entry=script_entry, idx_a=idx_a, idx_b=idx_a + 1)
            elif local_entry_file and lines:
                # if we are here it means we do not have a git diff, but a single script file
                init_lines = ["from clearml import Task\n", "Task.init()\n\n"]
                task_state['script']['diff'] = ''.join(lines[:idx_a] +
                                                       init_lines +
                                                       lines[idx_a:])
                # no need to add anything, we patched it.
                task_init_patch = ""
            else:
                # Add Task.init call
                task_init_patch += \
                    "from clearml import Task\n" \
                    "Task.init()\n\n"

            # make sure we add the diff at the end of the current diff
            task_state['script']['diff'] = task_state['script'].get('diff', '')
            if task_state['script']['diff'] and not task_state['script'][
                    'diff'].endswith('\n'):
                task_state['script']['diff'] += '\n'
            task_state['script']['diff'] += task_init_patch

        # set base docker image if provided
        if self.docker:
            if dry_run:
                task_state['container'] = dict(
                    image=self.docker.get('image') or '',
                    arguments=self.docker.get('args') or '',
                    setup_shell_script=self.docker.get('bash_script') or '',
                )
            else:
                task.set_base_docker(
                    docker_image=self.docker.get('image'),
                    docker_arguments=self.docker.get('args'),
                    docker_setup_bash_script=self.docker.get('bash_script'),
                )

        if self.verbose:
            if task_state['script']['repository']:
                repo_details = {
                    k: v
                    for k, v in task_state['script'].items()
                    if v and k not in ('diff', 'requirements', 'binary')
                }
                print('Repository Detected\n{}'.format(
                    json.dumps(repo_details, indent=2)))
            else:
                print('Standalone script detected\n  Script: {}'.format(
                    self.script))

            if task_state['script'].get('requirements') and \
                    task_state['script']['requirements'].get('pip'):
                print('Requirements:{}{}'.format(
                    '\n  Using requirements.txt: {}'.format(
                        self.requirements_file.as_posix())
                    if self.requirements_file else '',
                    '\n  {}Packages: {}'.format(
                        'Additional ' if self.requirements_file else '',
                        self.packages) if self.packages else ''))
            if self.docker:
                print('Base docker image: {}'.format(self.docker))

        if dry_run:
            return task_state

        # update the Task
        task.update_task(task_state)
        self.task = task
        return task

    def _set_output_uri(self, task):
        if self.output_uri:
            try:
                task.output_uri = self.output_uri
            except ValueError:
                getLogger().warning(
                    'Could not verify permission for output_uri: "{}"'.format(
                        self.output_uri))
                # do not verify the output uri (it might not be valid when we are creating the Task)
                task.storage_uri = self.output_uri

    def update_task_args(self, args=None):
        # type: (Optional[Union[Sequence[str], Sequence[Tuple[str, str]]]]) -> ()
        """
        Update the newly created Task argparse Arguments
        If called before Task created, used for argument verification

        :param args: Arguments to pass to the remote execution, list of string pairs (argument, value) or
            list of strings '<argument>=<value>'. Example: ['lr=0.003', (batch_size, 64)]
        """
        if not args:
            return

        # check args are in format <key>=<value>
        args_list = []
        for a in args:
            if isinstance(a, (list, tuple)):
                assert len(a) == 2
                args_list.append(a)
                continue
            try:
                parts = a.split('=', 1)
                assert len(parts) == 2
                args_list.append(parts)
            except Exception:
                raise ValueError(
                    "Failed parsing argument \'{}\', arguments must be in \'<key>=<value>\' format"
                )

        if not self.task:
            return

        task_params = self.task.get_parameters()
        args_list = {'Args/{}'.format(k): v for k, v in args_list}
        task_params.update(args_list)
        self.task.set_parameters(task_params)

    def get_id(self):
        # type: () -> Optional[str]
        """
        :return: Return the created Task id (str)
        """
        return self.task.id if self.task else None

    @staticmethod
    def _locate_future_import(lines):
        # type: (List[str]) -> int
        """
        :param lines: string lines of a python file
        :return: line index of the last __future_ import. return -1 if no __future__ was found
        """
        # skip over the first two lines, they are ours
        # then skip over empty or comment lines
        lines = [(i, line.split('#', 1)[0].rstrip())
                 for i, line in enumerate(lines)
                 if line.strip('\r\n\t ') and not line.strip().startswith('#')]

        # remove triple quotes ' """ '
        nested_c = -1
        skip_lines = []
        for i, line_pair in enumerate(lines):
            for _ in line_pair[1].split('"""')[1:]:
                if nested_c >= 0:
                    skip_lines.extend(list(range(nested_c, i + 1)))
                    nested_c = -1
                else:
                    nested_c = i
        # now select all the
        lines = [pair for i, pair in enumerate(lines) if i not in skip_lines]

        from_future = re.compile(r"^from[\s]*__future__[\s]*")
        import_future = re.compile(r"^import[\s]*__future__[\s]*")
        # test if we have __future__ import
        found_index = -1
        for a_i, (_, a_line) in enumerate(lines):
            if found_index >= a_i:
                continue
            if from_future.match(a_line) or import_future.match(a_line):
                found_index = a_i
                # check the last import block
                i, line = lines[found_index]
                # wither we have \\ character at the end of the line or the line is indented
                parenthesized_lines = '(' in line and ')' not in line
                while line.endswith('\\') or parenthesized_lines:
                    found_index += 1
                    i, line = lines[found_index]
                    if ')' in line:
                        break

            else:
                break

        return found_index if found_index < 0 else lines[found_index][0]
Ejemplo n.º 44
0
    def __init__(
            self,
            project_name=None,  # type: Optional[str]
            task_name=None,  # type: Optional[str]
            task_type=None,  # type: Optional[str]
            repo=None,  # type: Optional[str]
            branch=None,  # type: Optional[str]
            commit=None,  # type: Optional[str]
            script=None,  # type: Optional[str]
            working_directory=None,  # type: Optional[str]
            packages=None,  # type: Optional[Union[bool, Sequence[str]]]
            requirements_file=None,  # type: Optional[Union[str, Path]]
            docker=None,  # type: Optional[str]
            docker_args=None,  # type: Optional[str]
            docker_bash_setup_script=None,  # type: Optional[str]
            output_uri=None,  # type: Optional[str]
            base_task_id=None,  # type: Optional[str]
            add_task_init_call=True,  # type: bool
            raise_on_missing_entries=False,  # type: bool
            verbose=False,  # type: bool
    ):
        # type: (...) -> None
        """
        Create a new Task from an existing code base.
        If the code does not already contain a call to Task.init, pass add_task_init_call=True,
        and the code will be patched in remote execution (i.e. when executed by `clearml-agent`

        :param project_name: Set the project name for the task. Required if base_task_id is None.
        :param task_name: Set the name of the remote task. Required if base_task_id is None.
        :param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
            'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
        :param repo: Remote URL for the repository to use, OR path to local copy of the git repository
            Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
        :param branch: Select specific repository branch/tag (implies the latest commit from the branch)
        :param commit: Select specific commit id to use (default: latest commit,
            or when used with local repository matching the local commit id)
        :param script: Specify the entry point script for the remote execution. When used in tandem with
            remote git repository the script should be a relative path inside the repository,
            for example: './source/train.py' . When used with local repository path it supports a
            direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
        :param working_directory: Working directory to launch the script from. Default: repository root folder.
            Relative to repo root or local folder.
        :param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"]
            or `True` to automatically create requirements
            based on locally installed packages (repository must be local).
        :param requirements_file: Specify requirements.txt file to install when setting the session.
            If not provided, the requirements.txt from the repository will be used.
        :param docker: Select the docker image to be executed in by the remote session
        :param docker_args: Add docker arguments, pass a single string
        :param docker_bash_setup_script: Add bash script to be executed
            inside the docker before setting up the Task's environment
        :param output_uri: Optional, set the Tasks's output_uri (Storage destination).
            examples: 's3://bucket/folder', 'https://server/' , 'gs://bucket/folder', 'azure://bucket', '/folder/'
        :param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
            Essentially clones an existing task and overrides arguments/requirements.
        :param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
        :param raise_on_missing_entries: If True raise ValueError on missing entries when populating
        :param verbose: If True print verbose logging
        """
        if repo and len(urlparse(repo).scheme) <= 1 and not re.compile(
                self._VCS_SSH_REGEX).match(repo):
            folder = repo
            repo = None
        else:
            folder = None

        if raise_on_missing_entries and not base_task_id:
            if not script:
                raise ValueError("Entry point script not provided")
            if not repo and not folder and not Path(script).is_file():
                raise ValueError(
                    "Script file \'{}\' could not be found".format(script))
        if raise_on_missing_entries and commit and branch:
            raise ValueError(
                "Specify either a branch/tag or specific commit id, not both (either --commit or --branch)"
            )
        if raise_on_missing_entries and not folder and working_directory and working_directory.startswith(
                '/'):
            raise ValueError(
                "working directory \'{}\', must be relative to repository root"
            )

        if requirements_file and not Path(requirements_file).is_file():
            raise ValueError("requirements file could not be found \'{}\'")

        self.folder = folder
        self.commit = commit
        self.branch = branch
        self.repo = repo
        self.script = script
        self.cwd = working_directory
        assert not packages or isinstance(packages, (tuple, list, bool))
        self.packages = list(packages) if packages is not None and not isinstance(packages, bool) \
            else (packages or None)
        self.requirements_file = Path(
            requirements_file) if requirements_file else None
        self.base_task_id = base_task_id
        self.docker = dict(image=docker,
                           args=docker_args,
                           bash_script=docker_bash_setup_script)
        self.add_task_init_call = add_task_init_call
        self.project_name = project_name
        self.task_name = task_name
        self.task_type = task_type
        self.output_uri = output_uri
        self.task = None
        self.raise_on_missing_entries = raise_on_missing_entries
        self.verbose = verbose
Ejemplo n.º 45
0
 def _validate_input_file(self, input_path):
     self.logger.info(f'Validate Input File {input_path}.')
     input_narrative_file = Path(input_path)
     if not input_narrative_file.exists():
         self.logger.exception(f'Could not find the txt file.')
         exit(1)
Ejemplo n.º 46
0
def snmpsim(site, request, tmp_path_factory):
    tmp_path = tmp_path_factory.getbasetemp()
    source_data_dir = Path(request.fspath.dirname) / "snmp_data"

    log.logger.setLevel(logging.DEBUG)
    debug.enable()
    cmd = [
        "snmpsimd.py",
        #"--log-level=error",
        "--cache-dir",
        str(tmp_path / "snmpsim"),
        "--data-dir",
        str(source_data_dir),
        # TODO: Fix port allocation to prevent problems with parallel tests
        #"--agent-unix-endpoint="
        "--agent-udpv4-endpoint=127.0.0.1:1337",
        "--agent-udpv6-endpoint=[::1]:1337",
        "--v3-user=authOnlyUser",
        "--v3-auth-key=authOnlyUser",
        "--v3-auth-proto=MD5",
    ]

    p = subprocess.Popen(
        cmd,
        close_fds=True,
        # Silence the very noisy output. May be useful to enable this for debugging tests
        #stdout=open(os.devnull, "w"),
        #stderr=subprocess.STDOUT,
    )

    # Ensure that snmpsim is ready for clients before starting with the tests
    def is_listening():
        if p.poll() is not None:
            raise Exception("snmpsimd died. Exit code: %d" % p.poll())

        num_sockets = 0
        try:
            for e in os.listdir("/proc/%d/fd" % p.pid):
                try:
                    if os.readlink("/proc/%d/fd/%s" %
                                   (p.pid, e)).startswith("socket:"):
                        num_sockets += 1
                except OSError:
                    pass
        except OSError:
            if p.poll() is None:
                raise
            raise Exception("snmpsimd died. Exit code: %d" % p.poll())

        if num_sockets < 2:
            return False

        # Correct module is only available in the site
        import netsnmp  # type: ignore[import] # pylint: disable=import-error
        var = netsnmp.Varbind("sysDescr.0")
        result = netsnmp.snmpget(var,
                                 Version=2,
                                 DestHost="127.0.0.1:1337",
                                 Community="public")
        if result is None or result[0] is None:
            return False
        return True

    wait_until(is_listening, timeout=20)

    yield

    log.logger.setLevel(logging.INFO)
    debug.disable()

    logger.debug("Stopping snmpsimd...")
    p.terminate()
    p.wait()
    logger.debug("Stopped snmpsimd.")
Ejemplo n.º 47
0
 def _set_input_file(self, input_path):
     self.logger.info(f'Get Input File.')
     self._validate_input_file(input_path)
     return Path(input_path)
Ejemplo n.º 48
0
class TestNormalizer(TestCase):
    """References TestNormalizer

    Test suite for the Normalizer class.

    `Dat` and `Csv` may be used in unit tests because they does not
    contains any logic.
    """
    def setUp(self) -> None:
        """Initializing the object to test
        """
        self.normalizer = Normalizer(to_normalize_ext=Dat.ext,
                                     separator=Dat.separator)
        self.dummy_csv = Path(FileUtils.Csv.CSV_NAME)
        self.dummy_csv.touch()

        self.dummy_dat = Path(FileUtils.Csv.DAT_NAME)
        self.dummy_dat.touch()

    def tearDown(self) -> None:
        """Reinitialize state after unit tests execution
        """
        self.dummy_csv.unlink()
        self.dummy_dat.unlink()

    def test_invalid_is_valid_csv_field_number(self):
        """A bad formatted number should be invalid
        """
        # trailing quotes
        self.assertFalse(Normalizer.is_valid_csv_field('1337"'))
        # beginning quotes
        self.assertFalse(Normalizer.is_valid_csv_field('"1337'))
        # no quotes
        self.assertFalse(Normalizer.is_valid_csv_field('1337'))

    def test_valid_is_valid_csv_field_number(self):
        """A well formatted number should be valid
        """
        # int
        self.assertTrue(Normalizer.is_valid_csv_field('"42"'))
        # float
        self.assertTrue(Normalizer.is_valid_csv_field('"13.37"'))
        # negative
        self.assertTrue(Normalizer.is_valid_csv_field('"-3.14"'))

    def test_valid_is_valid_csv_field_string(self):
        """A well formatted string should be valid
        """
        # single string
        self.assertTrue(Normalizer.is_valid_csv_field('"field"'))
        # with spaces
        self.assertTrue(Normalizer.is_valid_csv_field('"some field"'))

    def test_invalid_convert_to_csv_no_file(self):
        """A non-existing file should throw an exception
        """
        # with an incorrect extension too
        with self.assertRaises(FileNotFoundError):
            self.normalizer.convert_to_csv(
                dat_path=FileUtils.Csv.NON_EXISTING_NAME)

        # with the appropriate extension
        with self.assertRaises(FileNotFoundError):
            self.normalizer.convert_to_csv(
                dat_path=FileUtils.Csv.NON_EXISTING_NAME + Dat.ext)

    def test_invalid_convert_to_csv_bad_file_dat_ext(self):
        """A bad DAT file extension should throw an exception
        """
        with self.assertRaises(BadFileFormatException):
            self.normalizer.convert_to_csv(dat_path=str(self.dummy_csv))

    def test_invalid_convert_to_csv_bad_file_dat_csv(self):
        """A bad CSV file extension should throw an exception
        """
        with self.assertRaises(BadFileFormatException):
            self.normalizer.convert_to_csv(dat_path=str(self.dummy_dat),
                                           csv_path=str(self.dummy_dat))

    def test_invalid_convert_to_csv_from_folder_non_existing_folder(self):
        """A non-existing folder should throw an exception
        """
        with self.assertRaises(BadFileFormatException):
            self.normalizer.convert_to_csv_from_folder(
                dat_folder=FileUtils.Csv.NON_EXISTING_NAME)

    def test_invalid_convert_to_csv_from_folder_not_folder(self):
        """A non-existing folder should throw an exception
        """
        with self.assertRaises(BadFileFormatException):
            self.normalizer.convert_to_csv_from_folder(
                dat_folder=self.dummy_dat)
Ejemplo n.º 49
0
    def set_syslog_ng_paths(self, instance_name):
        if self.__instance_name is not None:
            raise Exception("Instance already configured")
        self.__instance_name = instance_name
        working_dir = tc_parameters.WORKING_DIR
        relative_working_dir = self.__testcase_parameters.get_relative_working_dir(
        )
        install_dir = self.__testcase_parameters.get_install_dir()
        if not install_dir:
            raise ValueError("Missing --installdir start parameter")

        self.__syslog_ng_paths = {
            "dirs": {
                "install_dir": Path(install_dir)
            },
            "file_paths": {
                "config_path":
                Path(working_dir, "syslog_ng_{}.conf".format(instance_name)),
                "persist_path":
                Path(working_dir,
                     "syslog_ng_{}.persist".format(instance_name)),
                "pid_path":
                Path(working_dir, "syslog_ng_{}.pid".format(instance_name)),
                "control_socket_path":
                Path(relative_working_dir,
                     "syslog_ng_{}.ctl".format(instance_name)),
                "stderr":
                Path(working_dir, "syslog_ng_{}_stderr".format(instance_name)),
                "stdout":
                Path(working_dir, "syslog_ng_{}_stdout".format(instance_name)),
            },
            "binary_file_paths": {
                "slogkey": Path(install_dir, "bin", "slogkey"),
                "slogverify": Path(install_dir, "bin", "slogverify"),
                "syslog_ng_binary": Path(install_dir, "sbin", "syslog-ng"),
                "syslog_ng_ctl": Path(install_dir, "sbin", "syslog-ng-ctl"),
                "loggen": Path(install_dir, "bin", "loggen"),
            },
        }
        return self
def pca(path_to_data,
        dtype,
        n_channels,
        data_order,
        recordings,
        spike_index,
        spike_size,
        temporal_features,
        neighbors_matrix,
        channel_index,
        max_memory,
        gmm_params,
        output_path=None,
        scores_filename='scores.npy',
        rotation_matrix_filename='rotation.npy',
        spike_index_clear_filename='spike_index_clear_pca.npy',
        if_file_exists='skip'):
    """Apply PCA in batches

    Parameters
    ----------
    path_to_data: str
        Path to recordings in binary format

    dtype: str
        Recordings dtype

    n_channels: int
        Number of channels in the recordings

    data_order: str
        Recordings order, one of ('channels', 'samples'). In a dataset with k
        observations per channel and j channels: 'channels' means first k
        contiguous observations come from channel 0, then channel 1, and so
        on. 'sample' means first j contiguous data are the first observations
        from all channels, then the second observations from all channels and
        so on

    recordings: np.ndarray (n_observations, n_channels)
        Multi-channel recordings

    spike_index: numpy.ndarray
        A 2D numpy array, first column is spike time, second column is main
        channel (the channel where spike has the biggest amplitude)

    spike_size: int
        Spike size

    temporal_features: numpy.ndarray
        Number of output features

    neighbors_matrix: numpy.ndarray (n_channels, n_channels)
        Boolean numpy 2-D array where a i, j entry is True if i is considered
        neighbor of j

    channel_index: np.array (n_channels, n_neigh)
        Each row indexes its neighboring channels.
        For example, channel_index[c] is the index of
        neighboring channels (including itself)
        If any value is equal to n_channels, it is nothing but
        a space holder in a case that a channel has less than
        n_neigh neighboring channels


    max_memory:
        Max memory to use in each batch (e.g. 100MB, 1GB)

    gmm_params:
        Dictionary with the parameters of the Gaussian mixture model
        
    output_path: str, optional
        Directory to store the scores and rotation matrix, if None, previous
        results on disk are ignored, operations are computed and results
        aren't saved to disk

    scores_filename: str, optional
        File name for rotation matrix if False, does not save data

    rotation_matrix_filename: str, optional
        File name for scores if False, does not save data

    spike_index_clear_filename: str, optional
        File name for spike index clear

    if_file_exists:
        What to do if there is already a file in the rotation matrix and/or
        scores location. One of 'overwrite', 'abort', 'skip'. If 'overwrite'
        it replaces the file if it exists, if 'abort' if raise a ValueError
        exception if the file exists, if 'skip' if skips the operation if the
        file exists

    Returns
    -------
    scores: numpy.ndarray
        Numpy 3D array  of size (n_waveforms, n_reduced_features,
        n_neighboring_channels) Scores for every waveform, second dimension in
        the array is reduced from n_temporal_features to n_reduced_features,
        third dimension depends on the number of  neighboring channels

    rotation_matrix: numpy.ndarray
        3D array (window_size, n_features, n_channels)
    """

    ###########################
    # compute rotation matrix #
    ###########################

    bp = BatchProcessor(path_to_data,
                        dtype,
                        n_channels,
                        data_order,
                        max_memory,
                        buffer_size=spike_size)

    # compute WPCA
    WAVE, FEATURE, CH = 0, 1, 2

    logger.info('Preforming WPCA')

    logger.info('Computing Wavelets ...')
    feature = bp.multi_channel_apply(wavedec,
                                     mode='memory',
                                     pass_batch_info=True,
                                     spike_index=spike_index,
                                     spike_size=spike_size,
                                     wvtype='haar')

    features = reduce(lambda x, y: np.concatenate((x, y)),
                      [f for f in feature])

    logger.info('Computing weights..')

    # Weighting the features using metric defined in gmtype
    weights = gmm_weight(features, gmm_params, spike_index)
    wfeatures = features * weights

    n_features = wfeatures.shape[FEATURE]
    wfeatures_lin = np.reshape(
        wfeatures, (wfeatures.shape[WAVE] * n_features, wfeatures.shape[CH]))
    feature_index = np.arange(0, wfeatures.shape[WAVE] * n_features,
                              n_features)

    TMP_FOLDER, _ = os.path.split(path_to_data)
    feature_path = os.path.join(TMP_FOLDER, 'features.bin')
    feature_params = writefile(wfeatures_lin, feature_path)

    bp_feat = BatchProcessor(feature_path,
                             feature_params['dtype'],
                             feature_params['n_channels'],
                             feature_params['data_order'],
                             max_memory,
                             buffer_size=n_features)

    # compute PCA sufficient statistics from extracted features

    logger.info('Computing PCA sufficient statistics...')
    stats = bp_feat.multi_channel_apply(suff_stat_features,
                                        mode='memory',
                                        pass_batch_info=True,
                                        spike_index=spike_index,
                                        spike_size=spike_size,
                                        feature_index=feature_index,
                                        feature_size=n_features)

    suff_stats = reduce(lambda x, y: np.add(x, y), [e[0] for e in stats])
    spikes_per_channel = reduce(lambda x, y: np.add(x, y),
                                [e[1] for e in stats])

    # compute PCA projection matrix
    logger.info('Computing PCA projection matrix...')
    rotation = project(suff_stats, spikes_per_channel, temporal_features,
                       neighbors_matrix)

    #####################################
    # waveform dimensionality reduction #
    #####################################

    logger.info('Reducing spikes dimensionality with PCA matrix...')

    # using a new Batch to read feature file
    res = bp_feat.multi_channel_apply(score_features,
                                      mode='memory',
                                      pass_batch_info=True,
                                      rot=rotation,
                                      channel_index=channel_index,
                                      spike_index=spike_index,
                                      feature_index=feature_index)

    scores = np.concatenate([element[0] for element in res], axis=0)
    spike_index = np.concatenate([element[1] for element in res], axis=0)
    feature_index = np.concatenate([element[2] for element in res], axis=0)

    # renormalizing PC projections to similar unitary variance
    scores = st.zscore(scores, axis=0)

    # save scores
    if output_path and scores_filename:
        path_to_score = Path(output_path) / scores_filename
        save_numpy_object(scores,
                          path_to_score,
                          if_file_exists=if_file_exists,
                          name='scores')

    if output_path and spike_index_clear_filename:
        path_to_spike_index = Path(output_path) / spike_index_clear_filename
        save_numpy_object(spike_index,
                          path_to_spike_index,
                          if_file_exists=if_file_exists,
                          name='Spike index PCA')

    if output_path and rotation_matrix_filename:
        path_to_rotation = Path(output_path) / rotation_matrix_filename
        save_numpy_object(rotation,
                          path_to_rotation,
                          if_file_exists=if_file_exists,
                          name='rotation matrix')

    return scores, spike_index, rotation
Ejemplo n.º 51
0
import json

# Attach S3 bucket
fs = s3fs.S3FileSystem(key="", secret="")
path = 'agzr-capstone/Data/'

dataset = 'Common_Voice'

# Filter librosa import warnings
warnings.simplefilter('ignore', category=UserWarning)

# Get list of files
files = pd.read_csv('common_voice_files.csv')
# Subset files
language = 'Catalan'  # Change this to the language of choice
files = [i for i in files['filename'] if Path(i).parts[1] == language]

# Get speaker csvs
csv_path = Path().joinpath(dataset).joinpath(language).joinpath(
    'validated.tsv')
speaker_df = pd.read_csv(csv_path, sep='\t')


def split_waveform(wav, durs):
    '''Splits a waveform evenly into durations of a number of samples in durs,
    chosen randomly.
    
    Returns a list of lists of equal-length waveforms.'''
    # Get durations shorter than waveform
    num_samples = len(wav)
    wav_durs = [i for i in durs if num_samples // i > 0]
Ejemplo n.º 52
0
    def test_with_log(self, tmpdir):
        """Tests call with log file."""

        log_path = Path(native_str(tmpdir)) / 'test.log'
        util.run_command(['echo', 'test'], log_path=log_path)
Ejemplo n.º 53
0
def udb_corr(filelist,
             outpath='./',
             calibrate=False,
             new=True,
             gctime=None,
             attncal=True,
             desat=False):
    ''' Complete routine to read in an existing idb or udb file and output
        a new file of the same name in the local directory, with all corrections
        applied.
        
        Inputs:
          filelist  List of files to read.  The output of all files in the list
                        are concatenated into a single output file based on the first
                        file in the list.  Use an external loop over files if this is
                        not wanted.
          calibrate A boolean flag to indicate whether calibration factors should be
                        applied to the data.  The default is False, in anticipation that
                        such calibration will be applied as a CASA bandpass table, but
                        if set to True, the SOLPNTCAL analysis is used.
          new       If True (default), the "new" scheme of attenuation corrections
                        based on GAINCALTEST results is applied.  Otherwise, only the
                        nominal attenuation corrections are applied.
          gctime    A Time() object whose date is used to find the GAINCALTEST data.
                        If None (default), then the date of the data is used.  Note that
                        gctime is only used if parameter new is True.
          attncal   If False, the attenuation correction is skipped - expected to be
                        applied manually in post-processing (e.g. 2017-09-10 X8 flare)          
    '''
    import sys
    import os
    import udb_util as uu
    import time
    from pathlib2 import Path
    from copy import deepcopy
    if type(filelist) is str or type(filelist) is np.string_:
        # Convert input filename to list if not already a list
        filelist = [filelist]

    for idx, file in enumerate(filelist):
        if file[-1] == '/':
            filelist[idx] = file[:-1]

    filecount = 0
    for filename in filelist:
        t1 = time.time()
        if desat and filename.find('UDB') != -1:
            print('File', filename,
                  'appears to be a UDB file, so desat=True will be ignored.')
            out = uu.readXdata(filename)
        else:
            if desat:
                print('Correlator saturation correction will be applied.')
            out = uu.readXdata(filename, desat=desat)
        print 'Reading file took', time.time() - t1, 's'
        sys.stdout.flush()
        # Mask any cross-correlated data that have zero U coordinate (which indicates a stateframe error)
        ubad, = np.where(out['uvw'][0, 0] == 0)
        out['x'][:, :, :, ubad] = np.ma.masked

        trange = Time(out['time'][[0, -1]], format='jd')
        t1 = time.time()
        azeldict = get_sql_info(trange)
        print 'Reading SQL info took', time.time() - t1, 's'
        sys.stdout.flush()
        ## Correct data for attenuation changes
        if attncal:
            from calibration import skycal_anal
            t1 = time.time()
            if calibrate:
                # For the skycal, use the date that the total power calibration was taken
                if trange[0].datetime.hour < 7:
                    # Data time is earlier than 7 UT (i.e. on previous local day) so
                    # use previous date at 20 UT.
                    mjd = int(trange[0].mjd) - 1 + 20. / 24
                else:
                    # Use current date at 20 UT
                    mjd = int(trange[0].mjd) + 20. / 24
                calfac = get_calfac(Time(mjd, format='mjd'))
                caltime = Time(calfac['timestamp'], format='lv')
                skycal = skycal_anal(t=caltime, do_plot=False)
                if np.abs(caltime - trange[0]) > 0.5:
                    print 'Note, SKYCAL is being read from', caltime.iso[:
                                                                         10], 'to match TP calibration date.'
            else:
                skycal = skycal_anal(t=trange[0], do_plot=False)
            if new:
                # Subtract receiver noise, then correct for front end attenuation
                cout = apply_fem_level(out, gctime, skycal=skycal)
            else:
                cout = apply_attn_corr(out)
            print 'Applying attn correction took', time.time() - t1, 's'
            sys.stdout.flush()
            t1 = time.time()
        else:
            cout = out
        # Correct data for differential feed rotation
        coutu = unrot(cout, azeldict)
        print 'Applying feed rotation correction took', time.time() - t1, 's'
        sys.stdout.flush()
        # Optionally apply calibration to convert to solar flux units
        if calibrate:
            t1 = time.time()
            if trange[0].datetime.hour < 7:
                # Data time is earlier than 7 UT (i.e. on previous local day) so
                # use previous date at 20 UT.
                mjd = int(trange[0].mjd) - 1 + 20. / 24
            else:
                # Use current date at 20 UT
                mjd = int(trange[0].mjd) + 20. / 24
            calfac = get_calfac(Time(mjd, format='mjd'))
            if Time(calfac['sqltime'], format='lv').mjd == mjd:
                coutu = apply_calfac(coutu, calfac)
                print 'Applying calibration took', time.time() - t1, 's'
            else:
                print 'Error: no TP calibration for this date.  Skipping calibration.'
        sys.stdout.flush()
        filecount += 1
        if filecount == 1:
            x = coutu
        else:
            x = uu.concatXdata(x, coutu)
    ufilename = outpath + filelist[0].split('/')[-1]
    while Path(ufilename).exists():
        # Handle case of existing file, by appending _n, where n increments (up to 9)
        if ufilename[-2] == '_':
            ufilename = ufilename[:-1] + str(int(ufilename[-1]) + 1)
        else:
            ufilename += '_1'
    ufile_out = uu.udbfile_write(x, filelist[0], ufilename)
    return ufilename
Ejemplo n.º 54
0
from pathlib2 import Path
import re
from collections import OrderedDict
import json


class dotdict(dict):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__dict__ = self


file = Path('./10.1063_1.1774263_jats.txt')
with file.open(encoding='utf-8') as f:
    doc = f.read()

splitLineSpace = re.finditer(r'(?P<TX>[^\n]+)(?P<LF>\n)', doc)
textList = [chunk.groupdict() for chunk in splitLineSpace]
# print(textList)

for i, dicText in enumerate(textList):
    splitspaces = re.finditer(r'(?P<TK>[^\s]+?)(?P<SP>\s)', dicText['TX'])
    token_list = [ss.groupdict() for ss in splitspaces]
    dicText['TX'] = token_list
print(textList)

#
# for token_ in token_list:
#     print(token_)
#     puncts = re.match(r'(.+?)([\.\,\;\:]$)', token_[0])
#     if puncts:
Ejemplo n.º 55
0
from __future__ import print_function, unicode_literals

import datetime
import platform
import sys
from subprocess import check_call
from time import time

import babis
from apscheduler.schedulers.blocking import BlockingScheduler
from pathlib2 import Path


# ROOT path of the project. A pathlib.Path object.
ROOT_PATH = Path(__file__).resolve().parents[1]
ROOT = str(ROOT_PATH)

# add bedrock to path
sys.path.append(ROOT)

# must import after adding bedrock to path
from bedrock.base.config_manager import config  # noqa

# these are the defaults, but explicit is better
JOB_DEFAULTS = {
    'coalesce': True,
    'max_instances': 1,
}
schedule = BlockingScheduler(job_defaults=JOB_DEFAULTS)
Ejemplo n.º 56
0
    image_folder_path = image_folder_new_path


# Push your new change to github remote end
def git_ops():
    subprocess.run(["git", "add", "-A"])
    subprocess.run(["git", "commit", "-m", "update file " + args.input.stem])
    subprocess.run(["git", "push", "-u", "origin", "master"])


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        'Please input the file path you want to transfer using --input=""')

    # RGB arguments
    parser.add_argument('--compress',
                        action='store_true',
                        help='Compress the image which is too large')

    parser.add_argument('--input',
                        type=str,
                        help='Path to the file you want to transfer.')

    args = parser.parse_args()
    if args.input is None:
        raise FileNotFoundError("Please input the file's path to start!")
    else:
        args.input = Path(args.input)
        image_folder_path = args.input.parent / (args.input.stem)
        process_for_zhihu()
Ejemplo n.º 57
0
                        type=int)
    parser.add_argument('-l',
                        '--lr',
                        help='learning rate',
                        default=0.0001,
                        type=float)
    parser.add_argument('-o',
                        '--outputs',
                        help='output directory',
                        default='model')
    parser.add_argument('-f', '--dataset', help='cleaned data listing')
    args = parser.parse_args()

    info('Using TensorFlow v.{}'.format(tf.__version__))

    data_path = Path(args.base_path).joinpath(args.data).resolve(strict=False)
    target_path = Path(args.base_path).resolve(strict=False).joinpath(
        args.outputs)
    dataset = Path(args.base_path).joinpath(args.dataset)
    image_size = args.image_size

    args = {
        "data_path": str(data_path),
        "image_size": image_size,
        "epochs": args.epochs,
        "batch_size": args.batch,
        "learning_rate": args.lr,
        "output": str(target_path),
        "dataset": str(dataset)
    }
Ejemplo n.º 58
0
class TermiusApp(App):
    """Class for CLI application."""
    def __init__(self):
        """Construct new CLI application."""
        super(TermiusApp, self).__init__(
            description='Termius - crossplatform SSH and Telnet client',
            version=__version__,
            command_manager=CommandManager('termius.handlers'),
        )

        self.configure_signals()
        self.directory_path = Path(expanduser('~/.{}/'.format(self.NAME)))
        if not self.directory_path.is_dir():
            self.directory_path.mkdir(parents=True)

        self.command_manager.add_command('help', HelpCommand)

    def configure_logging(self):
        """Change logging level for request package."""
        super(TermiusApp, self).configure_logging()
        logging.getLogger('requests').setLevel(logging.WARNING)
        return

    # pylint: disable=no-self-use
    def configure_signals(self):
        """Bind subscribers to signals."""
        post_create_instance.connect(store_ssh_key, sender=SshKey)
        post_update_instance.connect(store_ssh_key, sender=SshKey)
        post_delete_instance.connect(delete_ssh_key, sender=SshKey)

        post_logout.connect(clean_data)

    def prepare_to_run_command(self, cmd):
        """Collect analytics if it`s not disabled."""
        if os.getenv('NOT_COLLECT_STAT'):
            return

        self.collect_analytics(cmd)

    def collect_analytics(self, cmd):
        """Make Analytics instance and send analytics."""
        analytics = Analytics(self, getattr(cmd, 'config', None))
        analytics.send_analytics(cmd.cmd_name)

    def build_option_parser(self, description, version, argparse_kwargs=None):
        """Return an argparse option parser for this application.

        Subclasses may override this method to extend
        the parser with more global options.

        :param description: full description of the application
        :paramtype description: str
        :param version: version number for the application
        :paramtype version: str
        :param argparse_kwargs: extra keyword argument passed to the
                                ArgumentParser constructor
        :paramtype extra_kwargs: dict
        """
        argparse_kwargs = argparse_kwargs or {}
        parser = argparse.ArgumentParser(description=description,
                                         add_help=False,
                                         **argparse_kwargs)
        parser.add_argument('--version',
                            action='version',
                            version='%(prog)s {0}'.format(version),
                            help='display version information and exit')
        verbose_group = parser.add_mutually_exclusive_group()
        verbose_group.add_argument(
            '-v',
            '--verbose',
            action='count',
            dest='verbose_level',
            default=self.DEFAULT_VERBOSE_LEVEL,
            help='provide a detailed output',
        )
        verbose_group.add_argument(
            '-q',
            '--quiet',
            action='store_const',
            dest='verbose_level',
            const=0,
            help='display warnings and errors only',
        )
        parser.add_argument(
            '--log-file',
            action='store',
            default=None,
            help='record output into a designated file',
        )
        if self.deferred_help:
            parser.add_argument(
                '-h',
                '--help',
                dest='deferred_help',
                action='store_true',
                help="display help message",
            )
        else:
            parser.add_argument(
                '-h',
                '--help',
                action=HelpAction,
                nargs=0,
                default=self,  # tricky
                help="show the help message",
            )
        parser.add_argument(
            '--debug',
            default=False,
            action='store_true',
            help='enable debugging mode',
        )
        return parser
Ejemplo n.º 59
0
import os
import getpass
import functools
import platform
from collections import namedtuple
try:
    from pathlib2 import Path
except ImportError:  # pragma: no cover
    from pathlib import Path
import clckwrkbdgr._six

_XDGDir = namedtuple('_XDGDir', 'name path ensure')
# Basic XDG structure.
_dir_data = [
    _XDGDir('XDG_CONFIG_HOME',
            Path('~').expanduser() / '.config', True),
]
if platform.system() == 'Windows':  # pragma: no cover -- Windows only.
    _dir_data += [
        _XDGDir('XDG_DATA_HOME', Path(os.environ.get('APPDATA')), False),
        _XDGDir('XDG_CACHE_HOME',
                Path(os.environ.get('LOCALAPPDATA')) / 'Cache', True),
        _XDGDir('XDG_RUNTIME_DIR',
                Path(os.environ.get('TEMP', os.environ['USERPROFILE'])),
                False),
        _XDGDir('XDG_STATE_HOME', Path(os.environ.get('LOCALAPPDATA')), False),
    ]
    try:
        import clckwrkbdgr.winnt.shell
        default_desktop_dir = clckwrkbdgr.winnt.shell.Desktop
    except:
Ejemplo n.º 60
0
    def create_task_from_function(
            cls,
            a_function,  # type: Callable
            function_kwargs=None,  # type: Optional[Dict[str, Any]]
            function_input_artifacts=None,  # type: Optional[Dict[str, str]]
            function_return=None,  # type: Optional[List[str]]
            project_name=None,  # type: Optional[str]
            task_name=None,  # type: Optional[str]
            task_type=None,  # type: Optional[str]
            auto_connect_frameworks=None,  # type: Optional[dict]
            auto_connect_arg_parser=None,  # type: Optional[dict]
            repo=None,  # type: Optional[str]
            branch=None,  # type: Optional[str]
            commit=None,  # type: Optional[str]
            packages=None,  # type: Optional[Union[str, Sequence[str]]]
            docker=None,  # type: Optional[str]
            docker_args=None,  # type: Optional[str]
            docker_bash_setup_script=None,  # type: Optional[str]
            output_uri=None,  # type: Optional[str]
            helper_functions=None,  # type: Optional[Sequence[Callable]]
            dry_run=False,  # type: bool
            _sanitize_function=None,  # type: Optional[Callable[[str], str]]
            _sanitize_helper_functions=None,  # type: Optional[Callable[[str], str]]
    ):
        # type: (...) -> Optional[Dict, Task]
        """
        Create a Task from a function, including wrapping the function input arguments
        into the hyper-parameter section as kwargs, and storing function results as named artifacts

        Example:
            def mock_func(a=6, b=9):
                c = a*b
                print(a, b, c)
                return c, c**2

            create_task_from_function(mock_func, function_return=['mul', 'square'])

        Example arguments from other Tasks (artifact):
            def mock_func(matrix_np):
                c = matrix_np*matrix_np
                print(matrix_np, c)
                return c

            create_task_from_function(
                mock_func,
                function_input_artifacts={'matrix_np': 'aabb1122.previous_matrix'},
                function_return=['square_matrix']
            )

        :param a_function: A global function to convert into a standalone Task
        :param function_kwargs: Optional, provide subset of function arguments and default values to expose.
            If not provided automatically take all function arguments & defaults
        :param function_input_artifacts: Optional, pass input arguments to the function from
            other Tasks's output artifact.
            Example argument named `numpy_matrix` from Task ID `aabbcc` artifact name `answer`:
            {'numpy_matrix': 'aabbcc.answer'}
        :param function_return: Provide a list of names for all the results.
            If not provided no results will be stored as artifacts.
        :param project_name: Set the project name for the task. Required if base_task_id is None.
        :param task_name: Set the name of the remote task. Required if base_task_id is None.
        :param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
            'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
        :param auto_connect_frameworks: Control the frameworks auto connect, see `Task.init` auto_connect_frameworks
        :param auto_connect_arg_parser: Control the ArgParser auto connect, see `Task.init` auto_connect_arg_parser
        :param repo: Remote URL for the repository to use, OR path to local copy of the git repository
            Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
        :param branch: Select specific repository branch/tag (implies the latest commit from the branch)
        :param commit: Select specific commit id to use (default: latest commit,
            or when used with local repository matching the local commit id)
        :param packages: Manually specify a list of required packages or a local requirements.txt file.
            Example: ["tqdm>=2.1", "scikit-learn"] or "./requirements.txt"
            If not provided, packages are automatically added based on the imports used in the function.
        :param docker: Select the docker image to be executed in by the remote session
        :param docker_args: Add docker arguments, pass a single string
        :param docker_bash_setup_script: Add bash script to be executed
            inside the docker before setting up the Task's environment
        :param output_uri: Optional, set the Tasks's output_uri (Storage destination).
            examples: 's3://bucket/folder', 'https://server/' , 'gs://bucket/folder', 'azure://bucket', '/folder/'
        :param helper_functions: Optional, a list of helper functions to make available
            for the standalone function Task.
        :param dry_run: If True do not create the Task, but return a dict of the Task's definitions
        :param _sanitize_function: Sanitization function for the function string.
        :param _sanitize_helper_functions: Sanitization function for the helper function string.
        :return: Newly created Task object
        """
        assert (not auto_connect_frameworks
                or isinstance(auto_connect_frameworks, (bool, dict)))
        assert (not auto_connect_arg_parser
                or isinstance(auto_connect_arg_parser, (bool, dict)))

        function_name = str(a_function.__name__)
        function_source = inspect.getsource(a_function)
        if _sanitize_function:
            function_source = _sanitize_function(function_source)
        function_source = cls.__sanitize_remove_type_hints(function_source)

        # add helper functions on top.
        for f in (helper_functions or []):
            f_source = inspect.getsource(f)
            if _sanitize_helper_functions:
                f_source = _sanitize_helper_functions(f_source)
            function_source = cls.__sanitize_remove_type_hints(
                f_source) + '\n\n' + function_source

        function_input_artifacts = function_input_artifacts or dict()
        # verify artifact kwargs:
        if not all(
                len(v.split('.', 1)) == 2
                for v in function_input_artifacts.values()):
            raise ValueError(
                'function_input_artifacts={}, it must in the format: '
                '{{"argument": "task_id.artifact_name"}}'.format(
                    function_input_artifacts))
        inspect_args = None
        function_kwargs_types = dict()
        if function_kwargs is None:
            function_kwargs = dict()
            inspect_args = inspect.getfullargspec(a_function)
            if inspect_args and inspect_args.args:
                inspect_defaults_vals = inspect_args.defaults
                inspect_defaults_args = inspect_args.args

                # adjust the defaults so they match the args (match from the end)
                if inspect_defaults_vals and len(inspect_defaults_vals) != len(
                        inspect_defaults_args):
                    inspect_defaults_args = inspect_defaults_args[
                        -len(inspect_defaults_vals):]

                if inspect_defaults_vals and len(inspect_defaults_vals) != len(
                        inspect_defaults_args):
                    getLogger().warning(
                        'Ignoring default argument values: '
                        'could not find all default valued for: \'{}\''.format(
                            function_name))
                    inspect_defaults_vals = []

                function_kwargs = {str(k): v for k, v in zip(inspect_defaults_args, inspect_defaults_vals)} \
                    if inspect_defaults_vals else {str(k): None for k in inspect_defaults_args}

        if function_kwargs:
            if not inspect_args:
                inspect_args = inspect.getfullargspec(a_function)
            # inspect_func.annotations[k]
            if inspect_args.annotations:
                supported_types = _Arguments.get_supported_types()
                function_kwargs_types = {
                    str(k): str(inspect_args.annotations[k].__name__)
                    for k in inspect_args.annotations
                    if inspect_args.annotations[k] in supported_types
                }

        task_template = cls.task_template.format(
            auto_connect_frameworks=auto_connect_frameworks,
            auto_connect_arg_parser=auto_connect_arg_parser,
            kwargs_section=cls.kwargs_section,
            input_artifact_section=cls.input_artifact_section,
            function_source=function_source,
            function_kwargs=function_kwargs,
            function_input_artifacts=function_input_artifacts,
            function_name=function_name,
            function_return=function_return)

        temp_dir = repo if repo and os.path.isdir(repo) else None
        with tempfile.NamedTemporaryFile('w', suffix='.py',
                                         dir=temp_dir) as temp_file:
            temp_file.write(task_template)
            temp_file.flush()

            requirements_file = None
            if packages and not isinstance(
                    packages, (list, tuple)) and Path(packages).is_file():
                requirements_file = packages
                packages = False

            populate = CreateAndPopulate(
                project_name=project_name,
                task_name=task_name or str(function_name),
                task_type=task_type,
                script=temp_file.name,
                packages=packages if packages is not None else True,
                requirements_file=requirements_file,
                repo=repo,
                branch=branch,
                commit=commit,
                docker=docker,
                docker_args=docker_args,
                docker_bash_setup_script=docker_bash_setup_script,
                output_uri=output_uri,
                add_task_init_call=False,
            )
            entry_point = '{}.py'.format(function_name)
            task = populate.create_task(dry_run=dry_run)

            if dry_run:
                task['script']['diff'] = task_template
                task['script']['entry_point'] = entry_point
                task['script']['working_dir'] = '.'
                task['hyperparams'] = {
                    cls.kwargs_section: {
                        k: dict(section=cls.kwargs_section,
                                name=k,
                                value=str(v) if v is not None else '',
                                type=function_kwargs_types.get(k, None))
                        for k, v in (function_kwargs or {}).items()
                    },
                    cls.input_artifact_section: {
                        k: dict(section=cls.input_artifact_section,
                                name=k,
                                value=str(v) if v is not None else '')
                        for k, v in (function_input_artifacts or {}).items()
                    }
                }
            else:
                task.update_task(
                    task_data={
                        'script':
                        task.data.script.to_dict().update({
                            'entry_point': entry_point,
                            'working_dir': '.',
                            'diff': task_template
                        })
                    })
                hyper_parameters = {'{}/{}'.format(cls.kwargs_section, k): str(v) for k, v in function_kwargs} \
                    if function_kwargs else {}
                hyper_parameters.update({
                    '{}/{}'.format(cls.input_artifact_section, k): str(v)
                    for k, v in function_input_artifacts
                } if function_input_artifacts else {})
                __function_kwargs_types = {'{}/{}'.format(cls.kwargs_section, k): v for k, v in function_kwargs_types} \
                    if function_kwargs_types else None
                task.set_parameters(hyper_parameters,
                                    __parameters_types=__function_kwargs_types)

            return task