Example #1
0
    def test_storage_simple_save(self):
        """
        Make sure that saving to remote locations actually works
        """
        def task(name, local, remote, cache_key):
            remote.save(name, local.open(name))
            self.assertTrue(isinstance(local, FileSystemStorage))
            self.assertTrue(isinstance(remote, FileSystemStorage))
            remote.save(name, local.open(name))

        def delay(*args, **kwargs):
            task(*args, **kwargs)

        task.delay = delay

        storage = QueuedStorage(
            local=FileSystemStorage(location=self.local_dir),
            remote=FileSystemStorage(location=self.remote_dir), task=task)

        field = TestModel._meta.get_field('file')
        field.storage = storage

        obj = TestModel(file=File(self.test_file))
        obj.save()

        self.assertTrue(path.isfile(path.join(self.local_dir, obj.file.name)))
        self.assertTrue(path.isfile(path.join(self.remote_dir, obj.file.name)))
Example #2
0
def load_data():
    """Loads movie_data, cust_data, and answers from pickles.

    Returns:
        The tuple (movie_data, cust_data, answers) with the objects loaded from
        their pickles.
    """
    # load movie data cache
    if isfile(CACHE_LOC + MOVIE_PICKLE):
        with open(CACHE_LOC + MOVIE_PICKLE, 'rb') as movie_file:
            movie_data = load(movie_file)
    else:
        movie_data = loads(urlopen(CACHE_URL + MOVIE_PICKLE).read())
    # load customer data cache
    if isfile(CACHE_LOC + CUSTOMER_PICKLE):
        with open(CACHE_LOC + CUSTOMER_PICKLE, 'rb') as cust_file:
            cust_data = load(cust_file)
    else:
        cust_data = loads(urlopen(CACHE_URL + CUSTOMER_PICKLE).read())
    # load answers
    if isfile(CACHE_LOC + ANSWER_PICKLE):
        with open(CACHE_LOC + ANSWER_PICKLE, 'rb') as answer_file:
            answers = load(answer_file)
    else:
        answers = loads(urlopen(CACHE_URL + ANSWER_PICKLE).read())
    return(movie_data, cust_data, answers)
Example #3
0
def main(max_stations=0, folder='.'):
    try:
        makedirs(output_folder+'/'+folder)
    except OSError:
        pass

    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for ndf in all_files:
        string = '_%dstations' % max_stations
        new_name=ndf[:-7]+string+ndf[-7:]
        rename(data_folder+'/'+ndf, data_folder+'/'+new_name)
        
    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for a_f in all_files:
        move(data_folder+'/'+a_f, output_folder+'/'+folder+'/'+a_f)
        print "Moved:", a_f[0:-3]
        
    data_files = [ f for f in listdir(output_folder+'/'+folder) if isfile(join(output_folder+'/'+folder,f)) and f.endswith('.dat.gz') ]

    print "\n"

    for d_f in data_files:
        fin = gzip.open(output_folder+'/'+folder+'/'+d_f, 'rb')
        data = fin.read()
        fin.close()

        with open(output_folder+'/'+folder+'/'+d_f[0:-3],'w') as fout:
            fout.write(data)

        print "Unzipped:", d_f[0:-3]
Example #4
0
def loadPlugin(package, filename, *args):
	modName = "%s.%s" % (__name__, path.basename(filename))
	
	# Search for full filename
	fullPath = path.join(_ROOT, package, filename)
	if not path.isfile(fullPath):
		fullPath = fullPath + ".py"
	if not path.isfile(fullPath):
		raise Exception("Unable to find module: %s" % fullPath)

	try:
		# Load file
		logging.debug("Loading module '%s' at: %s" % (modName, fullPath))
		module = imp.load_source(__name__, fullPath)

		if not module:
			raise Exception('Error loading module source')
		
		# Create instance using `create`
		logging.debug("Creating instance of module '%s'" % modName)
		inst = module.create(*args)

		# Validate
		if not inst:
			raise Exception("Create did not return a valid instance")
		if len(inst.__class__.__bases__) == 0:
			logging.warning("Class '%s' does not inherit from base class", modName)

		return inst
	except Exception as e:
		logging.error("Error loading module: %s" % e)
		raise Exception("Error loading module: %s" % e)
Example #5
0
def check_local_config_schema(build):
	log.info("Verifying your configuration settings...")
	# leave this import here: might not be on sys.path in some situations
	import validictory

	local_conf_filename = build.tool_config.get('general.local_config')
	if local_conf_filename is not None:
		# explicit conf file defined
		if not path.isfile(local_conf_filename):
			raise ConfigurationError("{file} does not exist!".format(file=local_conf_filename))
	else:
		local_conf_filename = 'local_config.json'
		if not path.isfile(local_conf_filename):
			log.warning("Local configuration file '{file}' does not exist!".format(file=local_conf_filename))
	
	with open(local_conf_filename) as local_conf_file:
		local_conf = json.load(local_conf_file)

	from forge.remote import Remote
	from forge import build_config
	remote = Remote(build_config.load())
	local_conf_schema = remote._api_get('platform/{platform_version}/local_config_schema'.format(
			platform_version=build.config['platform_version']))
	
	try:
		validictory.validate(local_conf, local_conf_schema)
	except validictory.validator.UnexpectedPropertyError as e:
		log.warning('Unexpected setting: "{error}" in "{file}". This will be ignored.'.format(
			file=local_conf_filename,
			error=e)
		)
	log.info("Configuration settings check complete")
Example #6
0
    def test_delayed_storage(self):
        storage = QueuedStorage(
            local='django.core.files.storage.FileSystemStorage',
            remote='django.core.files.storage.FileSystemStorage',
            local_options=dict(location=self.local_dir),
            remote_options=dict(location=self.remote_dir),
            delayed=True)

        field = TestModel._meta.get_field('file')
        field.storage = storage

        obj = TestModel(file=File(self.test_file))
        obj.save()

        self.assertIsNone(getattr(obj.file.storage, 'result', None))

        self.assertFalse(
            path.isfile(path.join(self.remote_dir, obj.file.name)),
            "Remote file should not be transferred automatically.")

        result = obj.file.storage.transfer(obj.file.name)
        result.get()

        self.assertTrue(
            path.isfile(path.join(self.remote_dir, obj.file.name)),
            "Remote file is not available.")
Example #7
0
def initializeInitD(ownerName):
    if (os.path.isdir(initdDirName)):
        fn = join(RANGER_USERSYNC_HOME, initdProgramName)
        initdFn = join(initdDirName, initdProgramName)
        shutil.copy(fn, initdFn)
        if (ownerName != 'ranger'):
            f = open(initdFn, 'r')
            filedata = f.read()
            f.close()
            find_str = "LINUX_USER=ranger"
            replace_str = "LINUX_USER="******"/etc/rc2.d", "/etc/rc3.d", "/etc/rc.d/rc2.d", "/etc/rc.d/rc3.d"]
        for rcDir in rcDirList:
            if (os.path.isdir(rcDir)):
                for prefix in initPrefixList:
                    scriptFn = prefix + initdProgramName
                    scriptName = join(rcDir, scriptFn)
                    if isfile(scriptName) or os.path.islink(scriptName):
                        os.remove(scriptName)
                    os.symlink(initdFn, scriptName)
        userSyncScriptName = "ranger-usersync-services.sh"
        localScriptName = os.path.abspath(join(RANGER_USERSYNC_HOME, userSyncScriptName))
        ubinScriptName = join("/usr/bin", initdProgramName)
        if isfile(ubinScriptName) or os.path.islink(ubinScriptName):
            os.remove(ubinScriptName)
        os.symlink(localScriptName, ubinScriptName)
Example #8
0
def download_properties(root_dir):
  """ Get the download properties.

  First tries to find the properties file in the given root directory,
  and if not found there, tries in the Gerrit settings folder in the
  user's home directory.

  Returns a set of download properties, which may be empty.

  """
  p = {}
  local_prop = path.join(root_dir, LOCAL_PROPERTIES)
  if not path.isfile(local_prop):
    local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
  if path.isfile(local_prop):
    try:
      with open(local_prop) as fd:
        for line in fd:
          if line.startswith('download.'):
            d = [e.strip() for e in line.split('=', 1)]
            name, url = d[0], d[1]
            p[name[len('download.'):]] = url
    except OSError:
      pass
  return p
Example #9
0
    def test_transfer_and_delete(self):
        """
        Make sure the TransferAndDelete task does what it says
        """
        storage = QueuedStorage(
            local='django.core.files.storage.FileSystemStorage',
            remote='django.core.files.storage.FileSystemStorage',
            local_options=dict(location=self.local_dir),
            remote_options=dict(location=self.remote_dir),
            task='queued_storage.tasks.TransferAndDelete')

        field = TestModel._meta.get_field('file')
        field.storage = storage

        obj = TestModel(file=File(self.test_file))
        obj.save()

        obj.file.storage.result.get()

        self.assertFalse(
            path.isfile(path.join(self.local_dir, obj.file.name)),
            "Local file is still available")
        self.assertTrue(
            path.isfile(path.join(self.remote_dir, obj.file.name)),
            "Remote file is not available.")
Example #10
0
def get_module_source(modname):
    """Try to find the source code for a module.

    Can return ('file', 'filename') in which case the source is in the given
    file, or ('string', 'source') which which case the source is the string.
    """
    if modname not in sys.modules:
        try:
            __import__(modname)
        except Exception as err:
            raise PycodeError('error importing %r' % modname, err)
    mod = sys.modules[modname]
    if hasattr(mod, '__loader__'):
        try:
            source = mod.__loader__.get_source(modname)
        except Exception as err:
            raise PycodeError('error getting source for %r' % modname, err)
        return 'string', source
    filename = getattr(mod, '__file__', None)
    if filename is None:
        raise PycodeError('no source found for module %r' % modname)
    filename = path.normpath(path.abspath(filename))
    lfilename = filename.lower()
    if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'):
        filename = filename[:-1]
        if not path.isfile(filename) and path.isfile(filename + 'w'):
            filename += 'w'
    elif not (lfilename.endswith('.py') or lfilename.endswith('.pyw')):
        raise PycodeError('source is not a .py file: %r' % filename)
    if not path.isfile(filename):
        raise PycodeError('source file is not present: %r' % filename)
    return 'file', filename
    def delete_certificate(self, certificate_id, hackathon):
        """Delete certificate by azureKey.id and hackathon

        Delete the hackathon-azureKey relationship first. If the auzreKey is not needed any more, delete the azureKey too

        :type certificate_id: int
        :param certificate_id: id of AzureKey

        :type hackathon: Hackathon
        :param hackathon: instance of Hackathon
        """
        # delete all hackathon-azureKey relationships first

        azure_key = AzureKey.objects(id=certificate_id).first()

        # if no relations left, delete the azureKey itself
        if azure_key in hackathon.azure_keys:
            try:
                if isfile(azure_key.cert_url):
                    os.remove(azure_key.cert_url)
                else:
                    self.storage.delete(azure_key.cert_url)
                if isfile(azure_key.pem_url):
                    os.remove(azure_key.pem_url)
                else:
                    self.storage.delete(azure_key.pem_url)
            except Exception as e:
                self.log.error(e)

            hackathon.azure_keys.remove(azure_key)
            hackathon.save()

        return ok(True)
Example #12
0
def nb_renderer(full_path):
    directory, base = split(full_path)
    cache_file = join(directory, '.%s.html' % base)
    if not current_app.config.get('DEBUG'):
        try:
            if isfile(cache_file) and getmtime(full_path) < getmtime(cache_file):
                current_app.logger.debug('Using Cache File %s' % cache_file)
                return raw_renderer(cache_file)
        except:
            current_app.logger.warn('There was an error reading from the cache file %s' % cache_file)

    ex = HTMLExporter(extra_loaders=[current_app.jinja_env.loader],
                      template_file='wakari_notebook.html')

    ex.environment.globals.update(current_app.jinja_env.globals)
    current_app.update_template_context(ex.environment.globals)
    ex.environment.globals.update(dirname=dirname(request.view_args['path']))

    output, _ = ex.from_filename(full_path)


    try:
        with open(cache_file, 'w') as fd:
            current_app.logger.debug('Writing Cache File %s' % cache_file)
            fd.write(output.encode(errors='replace'))
    except (OSError, IOError):
        current_app.logger.warn('There was an error writing to the cache file %s' % cache_file)
        try:
            if isfile(cache_file): os.unlink(cache_file)
        except OSError:
            current_app.logger.warn('There was an error removing the cache file %s' % cache_file)
            pass

    return output
Example #13
0
def get_recipes(filter):
    with open('support.json') as fi:
        supported = json.load(fi)
    result = 'result.txt'
    if isfile(result):
        with open(result) as fi:
            for line in fi:
                line = line.split()
                if(line[1] == "OK"):
                    OK.append(line[0])		    			  
    for dn in sorted(os.listdir('..')):
        if(dn not in OK):
            recipe_dir = join('..', dn)
            meta_path = join(recipe_dir, 'meta.yaml')
            if not isfile(meta_path):
                continue
            with open(meta_path) as fi:
                data = fi.read()
            name = yaml.load(data)['package']['name']
            if name not in supported:
                continue
            sl = supported[name]
            if sl == 'all':
                ALLOWED.append(name)
                yield recipe_dir, name
            else:
                assert isinstance(sl, list)
                if filter in sl:
                    ALLOWED.append(name)
                    yield recipe_dir, name
Example #14
0
 def _get_feature(self, typename):
     fname_feature = 'features/feature_%s.npy' % typename
     fname_names = 'features/names_%s.pkl' % typename
     if path.isfile(fname_feature) and path.isfile(fname_names):
         feature = np.load(fname_feature)
         names = pickle.load(open(fname_names, 'rb'))
         return feature, names
Example #15
0
def test_create_structure(tmpfolder):
    struct = {"my_file": "Some content",
              "my_folder": {
                  "my_dir_file": "Some other content",
                  "empty_file": "",
                  "file_not_created": None
              },
              "empty_folder": {}}
    expected = {"my_file": "Some content",
                "my_folder": {
                    "my_dir_file": "Some other content",
                    "empty_file": ""
                },
                "empty_folder": {}}
    changed, _ = structure.create_structure(struct, {})

    assert changed == expected
    assert isdir("my_folder")
    assert isdir("empty_folder")
    assert isfile("my_folder/my_dir_file")
    assert isfile("my_folder/empty_file")
    assert not isfile("my_folder/file_not_created")
    assert isfile("my_file")
    assert open("my_file").read() == "Some content"
    assert open("my_folder/my_dir_file").read() == "Some other content"
    assert open("my_folder/empty_file").read() == ""
Example #16
0
def install():
    """Install the Python CGI"""

    # Update the Package Index
    local("sudo apt-get update")
    # Check pip
    try:
        pip_version = check_output("pip -V", shell=True)
    except CalledProcessError:
        # Install pip
        downloads_path = expanduser("~/Downloads")
        local("cd %s; wget https://bootstrap.pypa.io/get-pip.py" %
                downloads_path)
        local("cd %s; sudo python get-pip.py" % downloads_path)
        pip_path = join(downloads_path, "get-pip.py")
        if exists(pip_path) and isfile(pip_path):
            remove(pip_path)
    # Installing Apache 2
    local("echo \"Y\" | sudo apt-get install apache2")
    # Disable multithreading processes
    local("sudo a2dismod mpm_event")
    # Give Apache explicit permission to run scripts
    local("sudo a2enmod mpm_prefork cgi")
    # Configuration
    config_file_abs_path = "/etc/apache2/sites-enabled/000-default.conf"
    if exists(config_file_abs_path) and isfile(config_file_abs_path):
        try:
            with open(config_file_abs_path, 'r') as f:
                config_file_lines = f.readlines()
        except Exception as exception: # Python3 PermissionError
            error_code = exception.errno
            if error_code == EACCES: # 13
                print(messages["_error_NoRoot"])
                exit(1)
            else:
                print(messages["_error_Oops"] % strerror(error_code))
                exit(1)
    try:
        with open(config_file_abs_path, 'w') as f:
            for line in config_file_lines:
                f.write(line)
                line_strip = line.strip()
                if line_strip == "<VirtualHost *:80>":
                    f.write("\t<Directory /var/www/html>\n")
                    f.write("\t\tOptions +ExecCGI\n")
                    f.write("\t\tDirectoryIndex index.html\n")
                    f.write("\t</Directory>\n")
                    f.write("\tAddHandler cgi-script .py\n")
    except Exception as exception: # Python3 PermissionError
        error_code = exception.errno
        if error_code == EACCES: # 13
            print(messages["_error_NoRoot"])
            exit(1)
        else:
            print(messages["_error_Oops"] % strerror(error_code))
            exit(1)
    # Restart Apache
    local("sudo service apache2 restart")
    # Output
    print(messages["_installed"])
Example #17
0
        def get_npz(name):
            fname = 'npz_data/%s.npz' % name
            if self.use_saved_npz and path.isfile(fname):
                all_data = np.load(fname)
                # Each work contains many parts. Loop through each one.
                return [all_data[i] for i in all_data.files]

            music_file = self._get_path('data/', name + '.krn')
            if not path.isfile(music_file):
                music_file = music_file[:-3] + 'xml'
            if not path.isfile(music_file):
                raise Exception("Cannot find score for %s" % music_file[:-4])
            score = music21.converter.parse(music_file)
            all_arr = []
            for part in score.parts:
                arr = []
                for note in part.flat:
                    if isinstance(note, music21.note.Note):
                        elem = (note.ps, note.quarterLength)
                    elif isinstance(note, music21.note.Rest):
                        elem = (0.0, note.quarterLength)
                    else:
                        continue
                    arr.append(elem)
                all_arr.append(np.array(arr))
            if self.save_data:
                np.savez(fname, *all_arr)
            return all_arr
Example #18
0
def test_render_report():
    """Test rendering -*.fif files for mne report.
    """

    report = Report(info_fname=raw_fname)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        report.parse_folder(data_path=base_dir)
    assert_true(len(w) == 1)

    # Check correct paths and filenames
    assert_true(raw_fname in report.fnames)
    assert_true(event_name in report.fnames)
    assert_true(report.data_path == base_dir)

    # Check if all files were rendered in the report
    fnames = glob.glob(op.join(base_dir, '*.fif'))
    bad_name = 'test_ctf_comp_raw-eve.fif'
    decrement = any(fname.endswith(bad_name) for fname in fnames)
    fnames = [fname for fname in fnames if
              fname.endswith(('-eve.fif', '-ave.fif', '-cov.fif',
                              '-sol.fif', '-fwd.fif', '-inv.fif',
                              '-src.fif', '-trans.fif', 'raw.fif',
                              'sss.fif', '-epo.fif')) and
              not fname.endswith(bad_name)]
    # last file above gets created by another test, and it shouldn't be there

    for fname in fnames:
        assert_true(''.join(report.html).find(op.basename(fname)) != -1)

    assert_equal(len(report.fnames), len(fnames))
    assert_equal(len(report.html), len(report.fnames))

    evoked1 = read_evokeds(evoked1_fname)
    evoked2 = read_evokeds(evoked2_fname)
    assert_equal(len(report.fnames) + len(evoked1) + len(evoked2) - 2,
                 report.initial_id - decrement)

    # Check saving functionality
    report.data_path = tempdir
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))

    # Check add_section functionality
    fig = evoked1[0].plot(show=False)
    report.add_section(figs=fig,  # test non-list input
                       captions=['evoked response'])
    assert_equal(len(report.html), len(fnames) + 1)
    assert_equal(len(report.html), len(report.fnames))
    assert_raises(ValueError, report.add_section, figs=[fig, fig],
                  captions='H')

    # Check saving same report to new filename
    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
    assert_true(op.isfile(op.join(tempdir, 'report2.html')))

    # Check overwriting file
    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
                overwrite=True)
    assert_true(op.isfile(op.join(tempdir, 'report.html')))
Example #19
0
def main_loop(argv=None):
    if argv is None:
        argv = sys.argv

    args = docopt.docopt(get_updated_docstring(), argv=argv[1:],
            version=".".join(map(str, __version__)))

    if not args["--silent"]:
        logcfg.set_loglevel(log, "INFO")
        for h in log.handlers:
            logcfg.set_loglevel(h, "INFO")
    elif args["--verbose"] > 0:
        logcfg.make_verbose()
        log.debug(pf(args))

    ext = args["--extension"]
    recursive = args["--recursive"]

    files_and_folders = []
    files_and_folders.extend(args["<file_or_folder>"])

    for faf in files_and_folders:
        if osp.isfile(faf):
            parse_file(faf, args)
        elif osp.isdir(faf):
            for entry in os.listdir(faf):
                path = osp.join(faf, entry)

                valid_file = osp.isfile(path)\
                    and osp.splitext(path)[-1] == ext\
                    and osp.basename(osp.splitext(path)[0]) != "cfg"
                valid_folder = recursive and osp.isdir(path)

                if valid_file or valid_folder:
                    files_and_folders.append(path)
Example #20
0
def build_chm_doc(libname):
    """Return CHM documentation file (on Windows only), which is copied under 
    {PythonInstallDir}\Doc, hence allowing Spyder to add an entry for opening 
    package documentation in "Help" menu. This has no effect on a source 
    distribution."""
    args = ''.join(sys.argv)
    if os.name == 'nt' and ('bdist' in args or 'build' in args):
        try:
            import sphinx  # analysis:ignore
        except ImportError:
            print('Warning: `sphinx` is required to build documentation',
                  file=sys.stderr)
            return
        hhc_base = r'C:\Program Files%s\HTML Help Workshop\hhc.exe'
        for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):
            if osp.isfile(hhc_exe):
                break
        else:
            print('Warning: `HTML Help Workshop` is required to build CHM '\
                  'documentation file', file=sys.stderr)
            return
        doctmp_dir = 'doctmp'
        subprocess.call('sphinx-build -b htmlhelp doc %s' % doctmp_dir,
                        shell=True)
        atexit.register(shutil.rmtree, osp.abspath(doctmp_dir))
        fname = osp.abspath(osp.join(doctmp_dir, '%s.chm' % libname))
        subprocess.call('"%s" %s' % (hhc_exe, fname), shell=True)
        if osp.isfile(fname):
            return fname
        else:
            print('Warning: CHM building process failed', file=sys.stderr)
Example #21
0
    def crop(self, dimensions=None, links_to_create=None):
        if path.isfile(settings.USER_FILES_ROOT + self.full_filename()):
            return  # already done, skip

        if not path.isfile(settings.UPLOADED_FILES_ROOT + self.full_filename()):
            return  # source image doesn't exist, can't crop it

        if not links_to_create:
            links_to_create = []

        x = y = w = h = 0
        if dimensions:
            x = dimensions['x']
            y = dimensions['y']
            w = dimensions['w']
            h = dimensions['h']

        # Queue a job for the cropping/resizing gearman worker
        gm_client = libgearman.Client()
        for server in settings.GEARMAN_SERVERS:
            gm_client.add_server(server)

        workload = {'file_hash': self.filename, 'format': self.format,
                    'x': x, 'y': y, 'w': w, 'h': h, 'links': links_to_create}
        gm_client.do_background('cropresize', json.dumps(workload))
Example #22
0
 def build_specific(self, filenames):
     # type: (List[unicode]) -> None
     """Only rebuild as much as needed for changes in the *filenames*."""
     # bring the filenames to the canonical format, that is,
     # relative to the source directory and without source_suffix.
     dirlen = len(self.srcdir) + 1
     to_write = []
     suffixes = None  # type: Tuple[unicode]
     suffixes = tuple(self.config.source_suffix)  # type: ignore
     for filename in filenames:
         filename = path.normpath(path.abspath(filename))
         if not filename.startswith(self.srcdir):
             logger.warning(__('file %r given on command line is not under the '
                               'source directory, ignoring'), filename)
             continue
         if not (path.isfile(filename) or
                 any(path.isfile(filename + suffix) for suffix in suffixes)):
             logger.warning(__('file %r given on command line does not exist, '
                               'ignoring'), filename)
             continue
         filename = filename[dirlen:]
         for suffix in suffixes:
             if filename.endswith(suffix):
                 filename = filename[:-len(suffix)]
                 break
         filename = filename.replace(path.sep, SEP)
         to_write.append(filename)
     self.build(to_write, method='specific',
                summary=__('%d source files given on command line') % len(to_write))
Example #23
0
def process(filename, size=-1):
   file = out_dir + filename
   if path.isfile(file) and stat(file).st_size == size:
      print 'Skipping: ' + filename
      return

   print 'Processing: ' + filename
   handle = urlopen(base_url + filename)
   headers = handle.info()
   content_length = int(headers.getheader('Content-Length'))
   last_modified = mktime(strptime(headers.getheader('Last-Modified'), '%a, %d %b %Y %H:%M:%S %Z'))

   if rfind(filename, '/') > 0:
      dir = out_dir + filename[:rfind(filename, '/')]
   else:
      dir = out_dir

   if not path.isdir(dir):
      print 'Creating ' + dir
      makedirs(dir)

   if not path.isfile(file):
      download(filename, last_modified)
   else:
      file_stat = stat(file)
      if file_stat.st_mtime != last_modified or file_stat.st_size != content_length:
         download(filename, last_modified)
      else:
         print 'Skipping: ' + filename
Example #24
0
def status(request, task_id):
    if request.method == 'GET':
        status_file_path = path.join(settings.MEDIA_ROOT, 'blast', 'task', task_id, 'status.json')
        status = {'status': 'unknown'}
        if path.isfile(status_file_path):
            with open(status_file_path, 'rb') as f:
                statusdata = json.load(f)
                if statusdata['status'] == 'pending' and settings.USE_CACHE:
                    tlist = cache.get('task_list_cache', []) 
                    num_preceding = -1; 
                    if tlist:
                        for index, tuple in enumerate(tlist):
                            if task_id in tuple:
                                num_preceding = index 
                                break
                    statusdata['num_preceding'] = num_preceding
                elif statusdata['status'] == 'running':
                    asn_path = path.join(settings.MEDIA_ROOT, 'blast', 'task', task_id, (task_id+'.asn'))
                    if path.isfile(asn_path):
                        with open(asn_path, 'r') as asn_f:
                            astr = asn_f.read()
                            processed_seq_count = astr.count('title \"')
                            statusdata['processed'] = processed_seq_count
                    else:
                        statusdata['processed'] = 0
                return HttpResponse(json.dumps(statusdata))
        return HttpResponse(json.dumps(status))
    else:
        return HttpResponse('Invalid Post')
Example #25
0
 def is_fresh(self, key, depends):
     if self.force:
         return
     mtime_cache = self.mtime_cache
     if self.prereq:
         output = join(self.base_dir, key)
         if not isfile(output):
             self.prereq_data.pop(key, None)
             return
         for dep in depends:
             if newer(dep, output, mtime_cache):
                 self.prereq_data.pop(key, None)
                 return
         if newer(self.config_path, output, mtime_cache):
             self.prereq_data.pop(key, None)
             return
         return 1
     paths = self.output_data.get(key)
     if not paths:
         return
     output_dir = self.output_dir
     for output in paths:
         output = join(output_dir, output)
         if not isfile(output):
             self.output_data.pop(key)
             return
     output = join(output_dir, list(paths).pop())
     for dep in depends:
         if newer(dep, output, mtime_cache):
             self.output_data.pop(key)
             return
     if newer(self.config_path, output, mtime_cache):
         self.output_data.pop(key)
         return
     return 1
Example #26
0
    def test_demo_html_normalized(self):
        """
        Ensure a demo.html in zip file is normalized to index.html when
        unpacked
        """

        fout = StringIO()
        zf = zipfile.ZipFile(fout, "w")
        zf.writestr("demo.html", """<html></html""")
        zf.writestr("css/main.css", "h1 { color: red }")
        zf.writestr("js/main.js", 'alert("HELLO WORLD");')
        zf.close()

        s = Submission(
            title="Hello world", slug="hello-world", description="This is a hello world demo", creator=self.user
        )

        s.demo_package.save("play_demo.zip", ContentFile(fout.getvalue()))
        s.demo_package.close()
        s.clean()
        s.save()

        s.process_demo_package()

        path = s.demo_package.path.replace(".zip", "")

        ok_(isdir(path))
        ok_(isfile("%s/index.html" % path))
        ok_(isfile("%s/css/main.css" % path))
        ok_(isfile("%s/js/main.js" % path))

        rmtree(path)
Example #27
0
    def test_demo_unicode_filenames(self):
        """Bug 741660: Demo package containing filenames with non-ASCII
        characters works"""

        fout = StringIO()
        zf = zipfile.ZipFile(fout, "w")
        zf.writestr("demo.html", """<html></html""")
        zf.writestr("css/예제.css", "h1 { color: red }")
        zf.writestr("js/示例.js", 'alert("HELLO WORLD");')
        zf.close()

        s = Submission(
            title="Hello world", slug="hello-world", description="This is a hello world demo", creator=self.user
        )

        s.demo_package.save("play_demo.zip", ContentFile(fout.getvalue()))
        s.demo_package.close()
        s.clean()
        s.save()

        s.process_demo_package()

        path = s.demo_package.path.replace(".zip", "")

        ok_(isdir(path))
        ok_(isfile((u"%s/index.html" % path).encode("utf-8")))
        ok_(isfile((u"%s/css/예제.css" % path).encode("utf-8")))
        ok_(isfile((u"%s/js/示例.js" % path).encode("utf-8")))

        rmtree(path)
Example #28
0
def dump_db(dumpfile="pootle_DB_backup.sql"):
    """Dumps the DB as a SQL script and downloads it"""
    require('environment', provided_by=[production, staging])

    if ((isfile(dumpfile) and confirm('\n%s already exists locally. Do you '
        'want to overwrite it?' % dumpfile, default=False))
        or not isfile(dumpfile)):

        remote_filename = '%s/%s' % (env['project_path'], dumpfile)

        if ((exists(remote_filename) and confirm('\n%s already exists. Do you '
            'want to overwrite it?' % remote_filename, default=False))
            or not exists(remote_filename)):

            print('\nDumping DB...')

            with settings(hide('stderr')):
                sudo('mysqldump -u %s -p %s > %s' % (env['db_user'],
                                                     env['db_name'],
                                                     remote_filename))
                get(remote_filename, '.')
        else:
            print('\nAborting.')
    else:
        print('\nAborting.')
def mainFunc():
    parser = argparse.ArgumentParser(description='Run Elastix registration protocol for all images in the directory')
    parser.add_argument('--refDir', '-r', dest='refDir', required = True, \
    help='The directory containing the reference images.')
    parser.add_argument('--floatFile', '-f', dest='floatFile', required = True, \
    help='Path to the floating image.')
    parser.add_argument('--outDir', '-o', dest='outDir', required = False, \
    help='Path to store the output images/parameters (default: current dir)', default=os.getcwd())
    parser.add_argument('--atlas', '-a', dest='atlas', required = False, \
    help='Path to the atlas segmentation file which will be resampled with the CPP file from the registration.', default=None)

    args = parser.parse_args()

    refImgs = [join(args.refDir, File) for File in listdir(args.refDir)]
    refImgs = [img for img in refImgs if isfile(img) and img.endswith('.nii')]

    if not refImgs:
        print('Couldn\'t find any reference images')
        return

    if not path.isfile(args.floatFile):
        print('Coudln\'t find the float image')

    refImgs.sort(key=str.lower)

    refFloatPairs = [[refImg, args.floatFile] for refImg in refImgs]

    f3dParStr = paramListToShortString(f3d_params)
    aladinParStr = paramListToShortString(aladin_params)
    for rfPair in refFloatPairs:
        baseName = basename(rfPair[0])[:-4]+'_'+basename(rfPair[1])[:-4]
        currOutDir = join(args.outDir,baseName)
        mkdir(currOutDir)
        elastixLogPath = join(currOutDir,basename+'_LOG.txt')
        elastixCommand = elastixExec+' -f '+rfPair[0]+' -m '+rfPair[1]+' -p '.join(elastixParams)+' -o '+currOutDir
        elastixLog = ''
        try:
            elastixLog = ccall(elastixCommand, shell=True, stderr=STDOUT)
        except CalledProcessError as err:
            writeAndDie(err.output, elastixLogPath)   
        with open(elastixLogPath, 'w') as f:
            f.write(elastixLog)
        
        transformParameterFiles = ['TransformParameters.0.txt', 'TransformParameters.1.txt']
        transformParameterFiles = [join(currOutDir,tpFile) for tpFile in transformParameterFiles]
        for tpFilePath in transformParameterFiles:
	  with open(tpFilePath,'r') as tpFile:
	    tpCont = tpFile.read()
	  tpCont = tpCont.replace('(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 1)')
	  with open(tpFilePath,'w') as tpFile:
	    tpCont = tpFile.write(tpCont)
        
        if args.atlas is not None:
	  atlasOutDir = join(currOutDir,'atlas')
	  mkdir(atlasOutDir)
          trfxCmd = trfxExec+' -in '+args.atlas+' -out '+atlasOutDir+' tp '+transformParameterFiles[-1]
          try:
            resampleLog = ccall(trfxCmd, shell=True, stderr=STDOUT)
          except CalledProcessError as err:
            writeAndDie(err.output, join(atlasOutDir,'ERR.txt'))
Example #30
0
def MODULE_LIST(force_compile=False):
    """Load scripts from scripts directory and return list of modules."""
    modules = []
    
    for search_path in [search_path for search_path in SCRIPT_SEARCH_PATHS if exists(search_path)]:
        to_compile = [file for file in os.listdir(search_path)
                      if file[-7:] == ".script" and file[0] != "_"
                      and ((not isfile(join(search_path, file[:-7] + '.py'))) or 
                           (isfile(join(search_path, file[:-7] + '.py')) and
                            (getmtime(join(search_path, file[:-7] + '.py')) < 
                             getmtime(join(search_path, file))))
                            or force_compile)
                          ]
        for script in to_compile:
            script_name = '.'.join(script.split('.')[:-1])
            compile_script(join(search_path, script_name))
    
        files = [file for file in os.listdir(search_path)
                 if file[-3:] == ".py" and file[0] != "_"
                 and '#retriever' in open(join(search_path, file), 'r').readline().lower()]
    
        for script in files:
            script_name = '.'.join(script.split('.')[:-1])
            file, pathname, desc = imp.find_module(script_name, [search_path])
            try:
                new_module = imp.load_module(script_name, file, pathname, desc)
                new_module.SCRIPT.download
                modules.append(new_module)
            except:
                sys.stderr.write("Failed to load script: %s (%s)" % (script_name, search_path))
    
    return modules
            else '07_{}_joint_angles.npy'.format(test_sequence_2)
        cmu_data = np.load(DATA_PATH + 'cmu_mocap/' + np_file)

        # Subsample test sequence.
        test_data_2 = cmu_data[::frame_subsamples]

        # Normalise data to zero mean and unit variance.
        y_test_2 = scaler.transform(test_data_2)

        # Define file path for results.
        dataset_str = 'cmu_subject7_training_seq_{}_test_seqs_{}_{}'.format(
            training_sequence, test_sequence_1, test_sequence_2)
        dp_gp_lvm_results_file = RESULTS_FILE_NAME.format(model='dp_gp_lvm', dataset=dataset_str)

        # Define instance of necessary model.
        if not isfile(dp_gp_lvm_results_file):
            # Reset default graph before building new model graph. This speeds up script.
            tf.reset_default_graph()
            np.random.seed(1)  # Random seed.
            # Define instance of DP-GP-LVM.
            model = dp_gp_lvm(y_train=y_train,
                              num_inducing_points=num_inducing_points,
                              num_latent_dims=num_latent_dimensions,
                              truncation_level=truncation_level,
                              mask_size=1)  # Treat each observed dimension as independent.

            model_training_objective = model.objective
            predict_lower_bound_1, x_mean_test_1, x_covar_test_1, test_log_likelihood_1 = \
                model.predict_new_latent_variables(y_test=y_test_1)
            model_test_objective_1 = tf.negative(predict_lower_bound_1)
            predict_lower_bound_2, x_mean_test_2, x_covar_test_2, test_log_likelihood_2 = \
Example #32
0
            str += "</{}>\n".format(value)
        else:
            str += " " * tabCount
            str += "<{}> {} </{}>\n".format(type, value, type)
    return str


if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: parser.py path/file.jack")
        print("       parser.py path")
        exit(0)

    jackFilePath = sys.argv[1]

    if path.isfile(jackFilePath):
        fullPath = os.path.abspath(jackFilePath)
        filePath, fileName = os.path.split(fullPath)
        filePre, fileExt = os.path.splitext(fileName)
        if fileName.endswith(".jack"):
            output = main(fullPath)

            xmlPath = os.path.join(fullPath, "_" + filePre + ".xml")
            with open(xmlPath, 'w') as xmlFile:
                xmlFile.write(output)

    elif path.isdir(jackFilePath):
        fullPath = os.path.abspath(jackFilePath)
        fileList = os.listdir(fullPath)
        for fileName in fileList:
            filePre, fileExt = os.path.splitext(fileName)
Example #33
0
            "handlers": ["mail_admins"],
            "level": "ERROR",
            "propagate": True,
        },
        "django.request.tastypie": {"level": "ERROR"},
        "administration": {"level": "DEBUG"},
        "common": {"level": "DEBUG"},
        "locations": {"level": "DEBUG"},
        "sword2": {"level": "INFO"},
        "boto3": {"level": "INFO"},
        "botocore": {"level": "INFO"},
    },
    "root": {"handlers": ["console"], "level": "WARNING"},
}

if isfile(LOGGING_CONFIG_FILE):
    with open(LOGGING_CONFIG_FILE, "rt") as f:
        LOGGING = logging.config.dictConfig(json.load(f))
else:
    logging.config.dictConfig(LOGGING)
# ######## END LOGGING CONFIGURATION


# ######## SESSION CONFIGURATION
# So the cookies don't conflict with archivematica cookies
SESSION_COOKIE_NAME = "storageapi_sessionid"
# ######## END SESSION CONFIGURATION


# ######## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
Example #34
0
#++++++++++++++++++++++ MAIN ++++++++++++++++++++++++++++++++++++++++++++++
#++++++Sampling Frequency Assignment
if channel == 'Koerperschall':
	fs = 1000.0
elif channel == 'Drehmoment':
	fs = 1000.0
elif channel == 'AE_Signal':
	fs = 1000000.0
else:
	print('Error fs assignment')
dt = 1.0/fs


#++++++List of Files in Directory

filenames = [f for f in os.listdir(mypath) if isfile(join(mypath, f))]

features = [
# 'KURT_WFM_0',
# 'RMS_WFM_0',
# 'LP6_WFM_0',
# 'LP7_WFM_0',
# 'LP16_WFM_0',
# 'LP17_WFM_0',
# 'LP21_WFM_0',
# 'LP24_WFM_0',
# 'LP16_FFT_0',
# 'LP17_FFT_0',
# 'LP21_FFT_0',
# 'LP24_FFT_0',
'NBU_WFM_0',
Example #35
0
                else:
                    tk.messagebox.showinfo(title='提示', message='下载失败!')
                bar.quit()

            WorkThread(action=copyAnrTool).start()
            bar.start('更新软件','正在下载......')

HEIGHT = 600
WIDTH = 800

if __name__ == '__main__':
    window = tk.Tk()
    window.title('Anr 工具')
    window.resizable(0, 0)
    ico = sep.join(['res',"anr.ico"])
    if isfile(ico):
        window.iconbitmap(ico)
    sw = window.winfo_screenwidth()
    sh = window.winfo_screenheight()
    ww = WIDTH
    wh = HEIGHT
    x = (sw - ww) / 2
    y = (sh - wh) / 2
    window.geometry("%dx%d+%d+%d" % (ww, wh, x, y))

    bg = sep.join(['res','window_bg.png'])
    if isfile(bg):
        canvas = tk.Canvas(window, height=HEIGHT, width=WIDTH)
        image_file = tk.PhotoImage(file=bg)
        image = canvas.create_image(WIDTH / 2, HEIGHT / 2, anchor='center', image=image_file)  # n 北方,s 南方, w西方,e东方,center中间
        canvas.pack(side='top')
Example #36
0
def fmda_advance_region(cycle, cfg, rtma, wksp_path, lookback_length, meso_token):
    """
    Advance the fuel moisture estimates in the region specified by the configuration.
    The function assumes that the fuel moisture model has not been advanced to this
    cycle yet and will overwrite any previous computations.
    
    Control flow:
    
    1) read in RTMA variables
    2) check if there is a stored FM model for previous cycle
    2a) yes -> load it, advance one time-step, perform DA
    2b) no -> compute equilibrium, use background covariance to do DA
    3) store model
    
    :param cycle: the datetime indicating the processed cycle in UTC
    :param cfg: the configuration dictionary specifying the region
    :param rtma: the RTMA object that can be used to retrieve variables for this cycle
    :param wksp_path: the workspace path for the cycler
    :param lookback_length: number of cycles to search before we find a computed cycle
    :param meso_token: the mesowest API access token
    :return: the model advanced and assimilated at the current cycle
    """
    logging.info("rtma_cycler.fmda_advance_region: %s" % str(cycle))
    model = None
    prev_cycle = cycle - timedelta(hours=1)
    prev_model_path = compute_model_path(prev_cycle, cfg.code, wksp_path)
    if not osp.exists(prev_model_path):
        logging.info('CYCLER cannot find model from previous cycle %s' % str(prev_cycle))
        if lookback_length > 0:
            model = fmda_advance_region(cycle - timedelta(hours=1), cfg, rtma, wksp_path, lookback_length - 1, meso_token)
    else:
        logging.info('CYCLER found previous model for cycle %s.' % str(prev_cycle))
        model = FuelMoistureModel.from_netcdf(prev_model_path)
        
    # retrieve the variables and make sure they are available (we should not be here if they are not)
    try:
        dont_have_vars, have_vars = rtma.retrieve_rtma(cycle)
    except ValueError as e:
        logging.error(e)
        sys.exit(1) 
    assert not dont_have_vars
    
    logging.info('CYCLER loading RTMA data for cycle %s.' % str(cycle))
    TD, T2, RH, precipa, hgt, lats, lons = load_rtma_data(have_vars, cfg.bbox)
    Ed, Ew = compute_equilibria(T2, RH)

    rain = precipa[:,:] + 0
    # remove rain that is too small to make any difference 
    rain[rain < 0.01] = 0
    # remove bogus rain that is too large 
    rain[rain > 1e10] = 0

    dom_shape = T2.shape

    # store the lons/lats for this domain
    geo_path = osp.join(wksp_path, '%s-geo.nc' % cfg.code)
    if not osp.isfile(geo_path):
        logging.info('CYCLER initializing new file %s.' % (geo_path))
        d = netCDF4.Dataset(geo_path, 'w', format='NETCDF4')
        d.createDimension('south_north', dom_shape[0])
        d.createDimension('west_east', dom_shape[1])
        xlat = d.createVariable('XLAT', 'f4', ('south_north', 'west_east'))
        xlat[:,:] = lats
        xlong = d.createVariable('XLONG', 'f4', ('south_north', 'west_east'))
        xlong[:,:] = lons
        d.close()
    else:
        logging.info('CYCLER file already exists:  %s.' % (geo_path))
    
    
    # the process noise matrix
    Q = np.diag([1e-4,5e-5,1e-5,1e-6,1e-6])
    
    # background covariance
    P0 = np.diag([0.01,0.01,0.01,0.001,0.001])

    # check if we must start from equilibrium
    if model is None:
        logging.info('CYCLER initializing from equilibrium for cycle %s.' % (str(cycle)))
        # setup model parameters    
        Nk = 3
        Tk = np.array([1.0, 10.0, 100.0])
        m0 = np.expand_dims(0.5 * (Ed + Ew), axis=2)
        model = FuelMoistureModel(m0[:,:,[0,0,0]], Tk, P0)
    else:
        logging.info('CYCLER advancing model one hour to cycle %s.' % (str(cycle)))
        dt = 3600 # always 1 hr step in RTMA
        model.advance_model(Ed, Ew, rain, dt, Q)

    logging.info('CYCLER retrieving fm-10 observations for cycle %s.' % (str(cycle)))
    
    # perform assimilation with mesowest observations
    tm_start = cycle - timedelta(minutes=30)
    tm_end = cycle + timedelta(minutes=30)
    fm10 = retrieve_mesowest_observations(meso_token, tm_start, tm_end, lats, lons)
    fm10v = []
    for fm10_obs in fm10.values():
        for obs in fm10_obs:
            fm10v.append(obs.get_value())
    
    logging.info('CYCLER retrieved %d valid observations, min/mean/max [%g/%g/%g].' %
                 (len(fm10),np.amin(fm10v),np.mean(fm10v),np.amax(fm10v)))
    
    # run the data assimilation step
    covs = [np.ones(dom_shape), hgt / 2000.0]
    covs_names = ['const','hgt/2000']
    if np.any(rain > 0.01):
        covs.append(rain)
        covs_names.append('rain')
    execute_da_step(model, cycle, covs, covs_names, fm10)
    
    # make geogrid files for WPS; datasets and lines to add to GEOGRID.TBL
    geo_path = compute_model_path(cycle, cfg.code, wksp_path,ext="geo")
    index = rtma.geogrid_index()
    print('index',index)
    model.to_geogrid(geo_path,index,lats,lons)

    # store the new model  
    model_path = compute_model_path(cycle, cfg.code, wksp_path)
    logging.info('CYCLER writing model variables to:  %s.' % model_path)
    model.to_netcdf(ensure_dir(model_path),
        {'EQUILd FM':Ed,'EQUILw FM':Ew,'TD':TD,'T2':T2,'RH':RH,'PRECIPA':precipa,'PRECIP':rain,'HGT':hgt})
    
    return model
Example #37
0
    def __init__(self,
                 fieldset,
                 ptype,
                 pyfunc=None,
                 funcname=None,
                 funccode=None,
                 py_ast=None,
                 funcvars=None,
                 c_include=""):
        self.fieldset = fieldset
        self.ptype = ptype
        self._lib = None

        # Derive meta information from pyfunc, if not given
        self.funcname = funcname or pyfunc.__name__
        if pyfunc is AdvectionRK4_3D:
            warning = False
            if isinstance(fieldset.W, Field) and fieldset.W.creation_log != 'from_nemo' and \
               fieldset.W._scaling_factor is not None and fieldset.W._scaling_factor > 0:
                warning = True
            if type(fieldset.W) in [SummedField, NestedField]:
                for f in fieldset.W:
                    if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:
                        warning = True
            if warning:
                logger.warning_once(
                    'Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n'
                    '         If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)'
                )
        if funcvars is not None:
            self.funcvars = funcvars
        elif hasattr(pyfunc, '__code__'):
            self.funcvars = list(pyfunc.__code__.co_varnames)
        else:
            self.funcvars = None
        self.funccode = funccode or inspect.getsource(pyfunc.__code__)
        # Parse AST if it is not provided explicitly
        self.py_ast = py_ast or parse(fix_indentation(self.funccode)).body[0]
        if pyfunc is None:
            # Extract user context by inspecting the call stack
            stack = inspect.stack()
            try:
                user_ctx = stack[-1][0].f_globals
                user_ctx['math'] = globals()['math']
                user_ctx['random'] = globals()['random']
                user_ctx['ErrorCode'] = globals()['ErrorCode']
            except:
                logger.warning(
                    "Could not access user context when merging kernels")
                user_ctx = globals()
            finally:
                del stack  # Remove cyclic references
            # Compile and generate Python function from AST
            py_mod = Module(body=[self.py_ast])
            exec(compile(py_mod, "<ast>", "exec"), user_ctx)
            self.pyfunc = user_ctx[self.funcname]
        else:
            self.pyfunc = pyfunc
        assert len(inspect.getargspec(self.pyfunc).args) == 3, \
            'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.'

        self.name = "%s%s" % (ptype.name, self.funcname)

        # Generate the kernel function and add the outer loop
        if self.ptype.uses_jit:
            kernelgen = KernelGenerator(fieldset, ptype)
            kernel_ccode = kernelgen.generate(deepcopy(self.py_ast),
                                              self.funcvars)
            self.field_args = kernelgen.field_args
            self.vector_field_args = kernelgen.vector_field_args
            fieldset = self.fieldset
            for fname in self.vector_field_args:
                f = getattr(fieldset, fname)
                Wname = f.W.name if f.W else 'not_defined'
                for sF in [f.U.name, f.V.name, Wname]:
                    if sF not in self.field_args:
                        try:
                            self.field_args[sF] = getattr(fieldset, sF)
                        except:
                            continue
            self.const_args = kernelgen.const_args
            loopgen = LoopGenerator(fieldset, ptype)
            if path.isfile(c_include):
                with open(c_include, 'r') as f:
                    c_include_str = f.read()
            else:
                c_include_str = c_include
            self.ccode = loopgen.generate(self.funcname, self.field_args,
                                          self.const_args, kernel_ccode,
                                          c_include_str)

            basename = path.join(get_cache_dir(), self._cache_key)
            self.src_file = "%s.c" % basename
            self.lib_file = "%s.%s" % (basename,
                                       'dll' if platform == 'win32' else 'so')
            self.log_file = "%s.log" % basename
Example #38
0
def main(argv):
    """
    Main entry point for gcexport.py
    """
    args = parse_arguments(argv)
    if args.version:
        print(argv[0] + ", version " + SCRIPT_VERSION)
        exit(0)

    print('Welcome to Garmin Connect Exporter!')

    # Create directory for data files.
    if isdir(args.directory):
        print(
            'Warning: Output directory already exists. Will skip already-downloaded files and \
            append to the CSV file.')

    login_to_garmin_connect(args)

    # We should be logged in now.
    if not isdir(args.directory):
        mkdir(args.directory)

    csv_filename = args.directory + '/activities.csv'
    csv_existed = isfile(csv_filename)

    csv_file = open(csv_filename, 'a')

    # Write header to CSV file
    if not csv_existed:
        csv_file.write('Activity name,\
    Description,\
    Begin timestamp,\
    Duration (h:m:s),\
    Moving duration (h:m:s),\
    Distance (km),\
    Average speed (km/h or min/km),\
    Average moving speed (km/h or min/km),\
    Max. speed (km/h or min/km),\
    Elevation loss uncorrected (m),\
    Elevation gain uncorrected (m),\
    Elevation min. uncorrected (m),\
    Elevation max. uncorrected (m),\
    Min. heart rate (bpm),\
    Max. heart rate (bpm),\
    Average heart rate (bpm),\
    Calories,\
    Avg. cadence (rpm),\
    Max. cadence (rpm),\
    Strokes,\
    Avg. temp (°C),\
    Min. temp (°C),\
    Max. temp (°C),\
    Map,\
    End timestamp,\
    Begin timestamp (ms),\
    End timestamp (ms),\
    Device,\
    Activity type,\
    Event type,\
    Time zone,\
    Begin latitude (°DD),\
    Begin longitude (°DD),\
    End latitude (°DD),\
    End longitude (°DD),\
    Elevation gain corrected (m),\
    Elevation loss corrected (m),\
    Elevation max. corrected (m),\
    Elevation min. corrected (m),\
    Sample count\n')

    if args.count == 'all':
        # If the user wants to download all activities, first download one,
        # then the result of that request will tell us how many are available
        # so we will modify the variables then.
        print(
            "Making result summary request ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        print(URL_GC_SEARCH)
        result = http_req(URL_GC_SEARCH)
        print(
            "Finished result summary request ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

        # Persist JSON
        write_to_file(args.directory + '/activities-summary.json', result, 'a')

        # Modify total_to_download based on how many activities the server reports.
        json_results = json.loads(
            result)  # TODO: Catch possible exceptions here.
        total_to_download = int(json_results['results']['totalFound'])
    else:
        total_to_download = int(args.count)
    total_downloaded = 0

    device_dict = dict()

    # load some dictionaries with lookup data from REST services
    activity_type_props = http_req(URL_GC_ACT_PROPS)
    # write_to_file(args.directory + '/activity_types.properties', activity_type_props, 'a')
    activity_type_name = load_properties(activity_type_props)
    event_type_props = http_req(URL_GC_EVT_PROPS)
    # write_to_file(args.directory + '/event_types.properties', event_type_props, 'a')
    event_type_name = load_properties(event_type_props)

    # This while loop will download data from the server in multiple chunks, if necessary.
    while total_downloaded < total_to_download:
        # Maximum chunk size 'LIMIT_MAXIMUM' ... 400 return status if over maximum.  So download
        # maximum or whatever remains if less than maximum.
        # As of 2018-03-06 I get return status 500 if over maximum
        if total_to_download - total_downloaded > LIMIT_MAXIMUM:
            num_to_download = LIMIT_MAXIMUM
        else:
            num_to_download = total_to_download - total_downloaded

        search_params = {'start': total_downloaded, 'limit': num_to_download}
        # Query Garmin Connect
        print("Making activity request ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        print(URL_GC_LIST + urlencode(search_params))
        result = http_req(URL_GC_LIST + urlencode(search_params))
        print(
            "Finished activity request ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

        # Persist JSON
        write_to_file(args.directory + '/activities.json', result, 'a')

        json_results = json.loads(
            result)  # TODO: Catch possible exceptions here.

        # search = json_results['results']['search']

        # Pull out just the list of activities.
        activities = json_results

        # Process each activity.
        for a in activities:
            # Display which entry we're working on.
            print('Garmin Connect activity: [' + str(a['activityId']) + '] ',
                  end='')
            print(a['activityName'])

            # Retrieve also the detail data from the activity (the one displayed on
            # the https://connect.garmin.com/modern/activity/xxx page), because some
            # data are missing from 'a' (or are even different, e.g. for my activities
            # 86497297 or 86516281)
            activity_details = None
            details = None
            tries = MAX_TRIES
            while tries > 0:
                activity_details = http_req(URL_GC_ACTIVITY +
                                            str(a['activityId']))
                details = json.loads(activity_details)
                # I observed a failure to get a complete JSON detail in about 5-10 calls out of 1000
                # retrying then statistically gets a better JSON ;-)
                if len(details['summaryDTO']) > 0:
                    tries = 0
                else:
                    print('retrying for ' + str(a['activityId']))
                    tries -= 1
                    if tries == 0:
                        raise Exception('Didn\'t get "summaryDTO" after ' +
                                        str(MAX_TRIES) + ' tries for ' +
                                        str(a['activityId']))

            parent_type_id = 4 if absent_or_null(
                'activityType', a) else a['activityType']['parentTypeId']
            type_id = 4 if absent_or_null('activityType',
                                          a) else a['activityType']['typeId']

            start_time_with_offset = offset_date_time(a['startTimeLocal'],
                                                      a['startTimeGMT'])
            elapsed_duration = details['summaryDTO'][
                'elapsedDuration'] if 'summaryDTO' in details and 'elapsedDuration' in details[
                    'summaryDTO'] else None
            duration = elapsed_duration if elapsed_duration else a['duration']
            duration_seconds = int(round(duration))
            end_time_with_offset = start_time_with_offset + timedelta(
                seconds=duration_seconds) if duration else None

            # get some values from detail if present, from a otherwise
            start_latitude = from_activities_or_detail('startLatitude', a,
                                                       details, 'summaryDTO')
            start_longitude = from_activities_or_detail(
                'startLongitude', a, details, 'summaryDTO')
            end_latitude = from_activities_or_detail('endLatitude', a, details,
                                                     'summaryDTO')
            end_longitude = from_activities_or_detail('endLongitude', a,
                                                      details, 'summaryDTO')

            print('\t' + start_time_with_offset.isoformat() + ', ', end='')
            if 'duration' in a:
                print(hhmmss_from_seconds(a['duration']) + ', ', end='')
            else:
                print('??:??:??, ', end='')
            if 'distance' in a and isinstance(a['distance'], (float)):
                print("{0:.3f}".format(a['distance'] / 1000) + 'km')
            else:
                print('0.000 km')

            # try to get the device details (and cache them, as they're used for multiple activities)
            device = None
            device_app_inst_id = None if absent_or_null(
                'metadataDTO', details
            ) else details['metadataDTO']['deviceApplicationInstallationId']
            if device_app_inst_id:
                if not device_dict.has_key(device_app_inst_id):
                    # print '\tGetting device details ' + str(device_app_inst_id)
                    device_details = http_req(URL_GC_DEVICE +
                                              str(device_app_inst_id))
                    write_to_file(
                        args.directory + '/device_' + str(device_app_inst_id) +
                        '.json', device_details, 'a')
                    device_dict[
                        device_app_inst_id] = None if not device_details else json.loads(
                            device_details)
                device = device_dict[device_app_inst_id]

            # Write stats to CSV.
            empty_record = '"",'
            csv_record = ''

            csv_record += empty_record if absent_or_null(
                'activityName',
                a) else '"' + a['activityName'].replace('"', '""') + '",'
            csv_record += empty_record if absent_or_null(
                'description',
                a) else '"' + a['description'].replace('"', '""') + '",'
            csv_record += '"' + start_time_with_offset.strftime(
                ALMOST_RFC_1123) + '",'
            # csv_record += '"' + startTimeWithOffset.isoformat() + '",'
            csv_record += empty_record if not duration else hhmmss_from_seconds(
                round(duration)) + ','
            csv_record += empty_record if absent_or_null(
                'movingDuration',
                details['summaryDTO']) else hhmmss_from_seconds(
                    details['summaryDTO']['movingDuration']) + ','
            csv_record += empty_record if absent_or_null(
                'distance',
                a) else '"' + "{0:.5f}".format(a['distance'] / 1000) + '",'
            csv_record += empty_record if absent_or_null(
                'averageSpeed', a) else '"' + trunc6(
                    pace_or_speed_raw(type_id, parent_type_id,
                                      a['averageSpeed'])) + '",'
            csv_record += empty_record if absent_or_null(
                'averageMovingSpeed', details['summaryDTO']) else '"' + trunc6(
                    pace_or_speed_raw(
                        type_id, parent_type_id,
                        details['summaryDTO']['averageMovingSpeed'])) + '",'
            csv_record += empty_record if absent_or_null(
                'maxSpeed', details['summaryDTO']) else '"' + trunc6(
                    pace_or_speed_raw(
                        type_id, parent_type_id,
                        details['summaryDTO']['maxSpeed'])) + '",'
            csv_record += empty_record if a[
                'elevationCorrected'] or absent_or_null(
                    'elevationLoss', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['elevationLoss'],
                              2)) + '",'
            csv_record += empty_record if a[
                'elevationCorrected'] or absent_or_null(
                    'elevationGain', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['elevationGain'],
                              2)) + '",'
            csv_record += empty_record if a[
                'elevationCorrected'] or absent_or_null(
                    'minElevation', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['minElevation'], 2)) + '",'
            csv_record += empty_record if a[
                'elevationCorrected'] or absent_or_null(
                    'maxElevation', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['maxElevation'], 2)) + '",'
            csv_record += empty_record  # no minimum heart rate in JSON
            csv_record += empty_record if absent_or_null(
                'maxHR', a) else '"' + "{0:.0f}".format(a['maxHR']) + '",'
            csv_record += empty_record if absent_or_null(
                'averageHR',
                a) else '"' + "{0:.0f}".format(a['averageHR']) + '",'
            csv_record += empty_record if absent_or_null(
                'calories', details['summaryDTO']) else '"' + str(
                    details['summaryDTO']['calories']) + '",'
            csv_record += empty_record if absent_or_null(
                'averageBikingCadenceInRevPerMinute', a) else '"' + str(
                    a['averageBikingCadenceInRevPerMinute']) + '",'
            csv_record += empty_record if absent_or_null(
                'maxBikingCadenceInRevPerMinute',
                a) else '"' + str(a['maxBikingCadenceInRevPerMinute']) + '",'
            csv_record += empty_record if absent_or_null(
                'strokes', a) else '"' + str(a['strokes']) + '",'
            csv_record += empty_record if absent_or_null(
                'averageTemperature', details['summaryDTO']) else '"' + str(
                    details['summaryDTO']['averageTemperature']) + '",'
            csv_record += empty_record if absent_or_null(
                'minTemperature', details['summaryDTO']) else '"' + str(
                    details['summaryDTO']['minTemperature']) + '",'
            csv_record += empty_record if absent_or_null(
                'maxTemperature', details['summaryDTO']) else '"' + str(
                    details['summaryDTO']['maxTemperature']) + '",'
            csv_record += '"https://connect.garmin.com/modern/activity/' + str(
                a['activityId']) + '",'
            csv_record += empty_record if not end_time_with_offset else '"' + end_time_with_offset.strftime(
                ALMOST_RFC_1123) + '",'
            csv_record += empty_record if not start_time_with_offset else '"' + start_time_with_offset.isoformat(
            ) + '",'
            # csv_record += empty_record if absent_or_null('beginTimestamp', a) else '"' + str(a['beginTimestamp']) + '",'
            csv_record += empty_record if not end_time_with_offset else '"' + end_time_with_offset.isoformat(
            ) + '",'
            # csv_record += empty_record if absent_or_null('beginTimestamp', a) else '"' + str(a['beginTimestamp']+durationSeconds*1000) + '",'
            csv_record += empty_record if absent_or_null(
                'productDisplayName',
                device) else '"' + device['productDisplayName'].replace(
                    '"', '""') + ' ' + device['versionString'] + '",'
            csv_record += empty_record if absent_or_null(
                'activityType', a) else '"' + value_if_found_else_key(
                    activity_type_name, 'activity_type_' +
                    a['activityType']['typeKey']) + '",'
            csv_record += empty_record if absent_or_null(
                'eventType', a) else '"' + value_if_found_else_key(
                    event_type_name, a['eventType']['typeKey']) + '",'
            csv_record += '"' + start_time_with_offset.isoformat()[-6:] + '",'
            csv_record += empty_record if not start_latitude else '"' + trunc6(
                start_latitude) + '",'
            csv_record += empty_record if not start_longitude else '"' + trunc6(
                start_longitude) + '",'
            csv_record += empty_record if not end_latitude else '"' + trunc6(
                end_latitude) + '",'
            csv_record += empty_record if not end_longitude else '"' + trunc6(
                end_longitude) + '",'
            csv_record += empty_record if not a[
                'elevationCorrected'] or absent_or_null(
                    'elevationGain', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['elevationGain'],
                              2)) + '",'
            csv_record += empty_record if not a[
                'elevationCorrected'] or absent_or_null(
                    'elevationLoss', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['elevationLoss'],
                              2)) + '",'
            csv_record += empty_record if not a[
                'elevationCorrected'] or absent_or_null(
                    'maxElevation', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['maxElevation'], 2)) + '",'
            csv_record += empty_record if not a[
                'elevationCorrected'] or absent_or_null(
                    'minElevation', details['summaryDTO']) else '"' + str(
                        round(details['summaryDTO']['minElevation'], 2)) + '",'
            csv_record += '""'  # no Sample Count in JSON
            csv_record += '\n'

            csv_file.write(csv_record.encode('utf8'))

            export_data_file(str(a['activityId']), activity_details, args)

        total_downloaded += num_to_download
    # End while loop for multiple chunks.

    csv_file.close()

    print('Open CSV output.')
    print(csv_filename)
    # open CSV file. Comment this line out if you don't want this behavior
    # call(["/usr/bin/libreoffice6.0", "--calc", csv_filename])

    print('Done!')
Example #39
0
def add_mim_extension():
    """Add extra files that are required to support MIM into the package.

    These files will be added by creating a symlink to the originals if the
    package is installed in `editable` mode (e.g. pip install -e .), or by
    copying from the originals otherwise.
    """

    # parse installment mode
    if 'develop' in sys.argv:
        # installed by `pip install -e .`
        if platform.system() == 'Windows':
            # set `copy` mode here since symlink fails on Windows.
            mode = 'copy'
        else:
            mode = 'symlink'
    elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv or \
            platform.system() == 'Windows':
        # installed by `pip install .`
        # or create source distribution by `python setup.py sdist`
        # set `copy` mode here since symlink fails with WinError on Windows.
        mode = 'copy'
    else:
        return

    filenames = ['tools', 'configs', 'model-index.yml']
    repo_path = osp.dirname(__file__)
    mim_path = osp.join(repo_path, 'mmseg', '.mim')
    os.makedirs(mim_path, exist_ok=True)

    for filename in filenames:
        if osp.exists(filename):
            src_path = osp.join(repo_path, filename)
            tar_path = osp.join(mim_path, filename)

            if osp.isfile(tar_path) or osp.islink(tar_path):
                os.remove(tar_path)
            elif osp.isdir(tar_path):
                shutil.rmtree(tar_path)

            if mode == 'symlink':
                src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
                try:
                    os.symlink(src_relpath, tar_path)
                except OSError:
                    # Creating a symbolic link on windows may raise an
                    # `OSError: [WinError 1314]` due to privilege. If
                    # the error happens, the src file will be copied
                    mode = 'copy'
                    warnings.warn(
                        f'Failed to create a symbolic link for {src_relpath}, '
                        f'and it will be copied to {tar_path}')
                else:
                    continue

            if mode == 'copy':
                if osp.isfile(src_path):
                    shutil.copyfile(src_path, tar_path)
                elif osp.isdir(src_path):
                    shutil.copytree(src_path, tar_path)
                else:
                    warnings.warn(f'Cannot copy file {src_path}.')
            else:
                raise ValueError(f'Invalid mode {mode}')
Example #40
0
def export_data_file(activity_id, activity_details, args):
    if args.format == 'gpx':
        data_filename = args.directory + '/activity_' + activity_id + '.gpx'
        download_url = URL_GC_GPX_ACTIVITY + activity_id + '?full=true'
        # download_url = URL_GC_GPX_ACTIVITY + activity_id + '?full=true' + '&original=true'
        print(download_url)
        file_mode = 'w'
    elif args.format == 'tcx':
        data_filename = args.directory + '/activity_' + activity_id + '.tcx'
        download_url = URL_GC_TCX_ACTIVITY + activity_id + '?full=true'
        file_mode = 'w'
    elif args.format == 'original':
        data_filename = args.directory + '/activity_' + activity_id + '.zip'
        fit_filename = args.directory + '/' + activity_id + '.fit'
        download_url = URL_GC_ORIGINAL_ACTIVITY + activity_id
        file_mode = 'wb'
    elif args.format == 'json':
        data_filename = args.directory + '/activity_' + activity_id + '.json'
        file_mode = 'w'
    else:
        raise Exception('Unrecognized format.')

    if isfile(data_filename):
        print('\tData file already exists; skipping...')
        return

    # Regardless of unzip setting, don't redownload if the ZIP or FIT file exists.
    if args.format == 'original' and isfile(fit_filename):
        print('\tFIT data file already exists; skipping...')
        return

    if args.format != 'json':
        # Download the data file from Garmin Connect. If the download fails (e.g., due to timeout),
        # this script will die, but nothing will have been written to disk about this activity, so
        # just running it again should pick up where it left off.
        print('\tDownloading file...')

        try:
            data = http_req(download_url)
        except urllib2.HTTPError as e:
            # Handle expected (though unfortunate) error codes; die on unexpected ones.
            if e.code == 500 and args.format == 'tcx':
                # Garmin will give an internal server error (HTTP 500) when downloading TCX files
                # if the original was a manual GPX upload. Writing an empty file prevents this file
                # from being redownloaded, similar to the way GPX files are saved even when there
                # are no tracks. One could be generated here, but that's a bit much. Use the GPX
                # format if you want actual data in every file, as I believe Garmin provides a GPX
                # file for every activity.
                print(
                    'Writing empty file since Garmin did not generate a TCX file for this \
                            activity...')
                data = ''
            elif e.code == 404 and args.format == 'original':
                # For manual activities (i.e., entered in online without a file upload), there is
                # no original file. # Write an empty file to prevent redownloading it.
                print(
                    'Writing empty file since there was no original activity data...'
                )
                data = ''
            else:
                raise Exception('Failed. Got an unexpected HTTP error (' +
                                str(e.code) + download_url + ').')
    else:
        data = activity_details

    # Persist file
    write_to_file(data_filename, data, file_mode)
    if args.format == 'gpx' and data:
        # Validate GPX data. If we have an activity without GPS data (e.g., running on a
        # treadmill). Garmin Connect still kicks out a GPX (sometimes), but there is only activity
        # information, no GPS data. N.B. You can omit the XML parse (and the associated log
        # messages) to speed things up.
        gpx = parseString(data)
        gpx_data_exists = len(gpx.getElementsByTagName('trkpt')) > 0

        if gpx_data_exists:
            print('Done. GPX data saved.')
        else:
            print('Done. No track points found.')
    elif args.format == 'original':
        # Even manual upload of a GPX file is zipped, but we'll validate the extension.
        if args.unzip and data_filename[-3:].lower() == 'zip':
            print("Unzipping and removing original files...")
            print('Filesize is: ' + str(stat(data_filename).st_size))
            if stat(data_filename).st_size > 0:
                zip_file = open(data_filename, 'rb')
                z = zipfile.ZipFile(zip_file)
                for name in z.namelist():
                    z.extract(name, args.directory)
                zip_file.close()
            else:
                print('Skipping 0Kb zip file.')
            remove(data_filename)
        print('Done.')
    elif args.format == 'json':
        # print nothing here
        pass
    else:
        # TODO: Consider validating other formats.
        print('Done.')
def make_tagged_corpus(perashki_dir=settings.PERASHKI_UNTAGGED_DIR, perashki_tagged_dir=settings.PERASHKI_TAGGED_DIR):
    perashki = [(join(perashki_dir, f), join(perashki_tagged_dir, f)) for f in listdir(perashki_dir) if isfile(join(perashki_dir, f))]
    morph = pymorphy2.MorphAnalyzer()
    
    for perashok_file, perashok_tagged_file in perashki:
        with open(perashok_file, encoding='utf-8') as input_stream, open(perashok_tagged_file, 'w', encoding='utf-8') as output_stream:
            for line in input_stream.readlines()[2:-1]:
                words = line.split()
                words_tagged = []
                for word in words:
                    words_tagged.append('{}:{}:{}'.format(word, get_lemma(word, morph), get_pos(word, morph)))
                print(*words_tagged, file=output_stream)
from keras import models
from keras.utils import np_utils
from keras.datasets import mnist
import keras as keras
import keras.backend as K
import numpy as np
from os import listdir
from os.path import isfile, join
import sys

path = sys.argv[1]

X_train = []
Y_train = []

onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]

print("Loading data...")

for file in onlyfiles:
    file = join(path, file)
    f = open(file, "r", encoding='latin-1')

    line = f.readline()

    while line:
        try:
            splitted = line.replace("(", "").replace(")", "") \
                  .replace("[", "").replace("]", "") \
                  .replace('\n', "").replace(" ", "").split(",")
Example #43
0
 def _exists(self) -> bool:
     return isfile(self._filepath)
folder = 'dataset/'
new_folder = 'processed_dataset/'
index = 5

TEST_DATA = []

def squash_arrays(files, path):
	arrays = [np.load(f) for f in files]
	res = np.concatenate(arrays)
	np.random.shuffle(res)

	TEST_DATA.extend(res[-10:])
	res = res[:-10]
	
	np.save(path, res)
	del arrays
	del res


if __name__ == '__main__':
	if not isdir(new_folder):
		makedirs(new_folder)
	npy_files = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f))]
	while index <= len(npy_files) or index == 5:
		files = npy_files[index - 5: index]
		path = join(new_folder, str(index // 5))
		squash_arrays(files, path)
		index += 5

	np.save(join(new_folder, 'test.npy'), TEST_DATA)
Example #45
0
    Searches for all files matching a particular glob pattern, extracts the given slice as an
    integer, and makes sure it is in the list of allowed numbers. If not, the file is deleted.
    """
    from glob import iglob
    from os import unlink
    from os.path import basename, join, isfile

    files = ((f, basename(f)[match_slice]) for f in iglob(join(d, pattern))
             if isfile(f))
    for f in (f for f, x in files if x.isdigit() and int(x) not in allowed):
        try:
            unlink(f)
        except Exception, e:
            pass
    files = ((f, basename(f)[match_slice])
             for f in iglob(join(d, '.' + pattern)) if isfile(f))
    for f in (f for f, x in files if x.isdigit() and int(x) not in allowed):
        try:
            unlink(f)
        except Exception, e:
            pass


def get_terminal_width():
    """Gets the width of the terminal if there is a terminal, in which case 80 is returned."""
    from os import environ
    from sys import platform

    if platform == "win32":
        # Windows
        from ctypes import windll, c_short, c_ushort, c_int, c_uint, c_void_p, byref, Structure
Example #46
0
    def initGui(self):

        # Setup menu
        self.quickosm_menu = QMenu('Quick OSM')
        self.quickosm_menu.setIcon(QIcon(':/plugins/QuickOSM/icon.png'))
        self.dock_menu = QMenu(tr('QuickOSM', u'Dock'))
        self.vector_menu = self.iface.vectorMenu()
        self.vector_menu.addMenu(self.quickosm_menu)

        # Main window
        self.mainWindowAction = QAction(
            QIcon(':/plugins/QuickOSM/icon.png'),
            u'QuickOSM',
            self.iface.mainWindow())
        # noinspection PyUnresolvedReferences
        self.mainWindowAction.triggered.connect(self.openMainWindow)
        self.toolbar.addAction(self.mainWindowAction)
        self.iface.QuickOSM_mainWindowDialog = MainWindowDialog()

        # OSM File
        self.osmFileAction = QAction(
            QIcon(':/plugins/QuickOSM/resources/open.png'),
            tr('ui_osm_file', 'OSM File'),
            self.iface.mainWindow())
        # noinspection PyUnresolvedReferences
        self.osmFileAction.triggered.connect(self.openOsmFileDockWidget)
        self.osmFileDockWidget = OsmFileDockWidget()
        self.iface.addDockWidget(
            Qt.RightDockWidgetArea, self.osmFileDockWidget)
        self.osmFileDockWidget.hide()
        self.osmFileDockWidget.setObjectName('osmFileWidget')

        # My queries
        self.myQueriesAction = QAction(
            QIcon(':/plugins/QuickOSM/resources/favorites.png'),
            tr('ui_my_queries', 'My queries'),
            self.iface.mainWindow())
        # noinspection PyUnresolvedReferences
        self.myQueriesAction.triggered.connect(self.openMyQueriesDockWidget)
        self.myQueriesDockWidget = MyQueriesDockWidget()
        self.iface.addDockWidget(
            Qt.RightDockWidgetArea, self.myQueriesDockWidget)
        self.myQueriesDockWidget.hide()
        self.myQueriesDockWidget.setObjectName('myQueriesWidget')

        # Query
        self.queryAction = QAction(
            QIcon(':/plugins/QuickOSM/resources/edit.png'),
            tr('ui_query', 'Query'),
            self.iface.mainWindow())
        # noinspection PyUnresolvedReferences
        self.queryAction.triggered.connect(self.openQueryDockWidget)
        self.queryDockWidget = QueryDockWidget()
        self.iface.addDockWidget(Qt.RightDockWidgetArea, self.queryDockWidget)
        self.queryDockWidget.hide()
        self.queryDockWidget.setObjectName('queryWidget')

        # Quick query
        self.quickQueryAction = QAction(
            QIcon(':/plugins/QuickOSM/resources/quick.png'),
            tr('ui_quick_query', 'Quick query'),
            self.iface.mainWindow())
        # noinspection PyUnresolvedReferences
        self.quickQueryAction.triggered.connect(self.openQuickQueryDockWidget)
        self.quickQueryDockWidget = QuickQueryDockWidget()
        self.iface.addDockWidget(
            Qt.RightDockWidgetArea, self.quickQueryDockWidget)
        self.quickQueryDockWidget.hide()
        self.quickQueryDockWidget.setObjectName('quickQueryWidget')

        # Insert in the good order
        self.quickosm_menu.addAction(self.mainWindowAction)
        self.quickosm_menu.addMenu(self.dock_menu)
        self.dock_menu.addAction(self.quickQueryAction)
        self.dock_menu.addAction(self.queryAction)
        self.dock_menu.addAction(self.myQueriesAction)
        self.dock_menu.addAction(self.osmFileAction)

        # Connect signals and slots from dock
        self.queryDockWidget.signal_new_query_successful.connect(
            self.iface.QuickOSM_mainWindowDialog.refresh_my_queries_tree)
        self.queryDockWidget.signal_new_query_successful.connect(
            self.myQueriesDockWidget.refresh_my_queries_tree)
        self.myQueriesDockWidget.signal_delete_query_successful.connect(
            self.myQueriesDockWidget.refresh_my_queries_tree)
        self.myQueriesDockWidget.signal_delete_query_successful.connect(
            self.iface.QuickOSM_mainWindowDialog.refresh_my_queries_tree)

        # Connect signals and slots from mainWindow
        self.iface.QuickOSM_mainWindowDialog.signal_new_query_successful.\
            connect(self.myQueriesDockWidget.refresh_my_queries_tree)
        self.iface.QuickOSM_mainWindowDialog.signal_new_query_successful.\
            connect(
                self.iface.QuickOSM_mainWindowDialog.refresh_my_queries_tree)
        self.iface.QuickOSM_mainWindowDialog.signal_delete_query_successful.\
            connect(self.myQueriesDockWidget.refresh_my_queries_tree)
        self.iface.QuickOSM_mainWindowDialog.signal_delete_query_successful.\
            connect(
                self.iface.QuickOSM_mainWindowDialog.refresh_my_queries_tree)

        # Read the config file
        json_file_config = join(dirname(abspath(__file__)), 'config.json')
        if isfile(json_file_config):
            config_json = load(open(json_file_config))
            for server in config_json['overpass_servers']:
                self.iface.QuickOSM_mainWindowDialog.comboBox_default_OAPI.\
                    addItem(server)

        # Check previous version and if new queries are available
        version = get_setting('version')
        current_version = get_current_version()
        if version != current_version:
            if new_queries_available():
                message = 'New queries are available in the plugin. Would ' \
                          'like to install them ? This will overwrite the ' \
                          'current default queries.'
                title = 'QuickOSM'
                message = tr('QuickOSM', message)
                widget = self.iface.messageBar().createMessage(title, message)
                button_ok = QPushButton(widget)
                button_ok.setText(
                    tr('QuickOSM', 'Install'))
                button_ok.pressed.connect(self.restoreDefaultQueries)
                widget.layout().addWidget(button_ok)
                self.iface.messageBar().pushWidget(
                    widget, QgsMessageBar.INFO, 0)

            set_setting('version', current_version)
Example #47
0
def create_summary_evaluationmetrics(destdir):
    '''Creates a new HDF file storing some metrics for all precidtions
    (HDF files) found inside `destdir` and subfolders

    :param destdir: a destination directory **whose FILE SUBTREE STRUCTURE
        MUST HAVE BEEN CREATED BY `run_evaluation` or (if calling from scrippt file)
        `evaluate.py`: a list of scikit model files with associated directories
        (the model name without the extension '.sklmodel') storing each
        prediction run on HDF datasets.
    `'''
    print('Computing summary evaluation metrics from '
          'predictions data frames (HDF file)')

    eval_df_path = join(destdir, 'evaluationmetrics.hdf')

    dfr, already_processed_tuples = None, set()
    if isfile(eval_df_path):
        dfr = pd.read_hdf(eval_df_path)
        already_processed_tuples = set(dfr._key)

    def build_key(clfdir, testname):
        return join(basename(clfdir), testname)

    # a set is faster than a dataframe for searching already processed
    # couples of (clf, testset_hdf):
    newrows = []
    clfs_prediction_paths = []
    for clfname in [] if not isdir(destdir) else listdir(destdir):
        clfdir, ext = splitext(clfname)
        if ext != '.sklmodel':
            continue
        clfdir = join(destdir, clfdir)
        for testname in [] if not isdir(clfdir) else listdir(clfdir):
            if build_key(clfdir, testname) in already_processed_tuples:
                continue
            clfs_prediction_paths.append((clfdir, testname))

    print('%d new prediction(s) found' % len(clfs_prediction_paths))
    if clfs_prediction_paths:
        errors = []
        pool = Pool(processes=int(cpu_count()))
        with click.progressbar(length=len(clfs_prediction_paths),
                               fill_char='o', empty_char='.') as pbar:
            for clfdir, testname, dic in \
                    pool.imap_unordered(_get_summary_evaluationmetrics_mp,
                                        clfs_prediction_paths):
                pbar.update(1)
                if isinstance(dic, Exception):
                    errors.append(dic)
                else:
                    dic['_key'] = build_key(clfdir, testname)
                    newrows.append(dic)

        if newrows:
            new_df = pd.DataFrame(data=newrows)
            if dfr is None:
                dfr = new_df
            else:
                dfr = pd.concat([dfr, new_df], axis=0, copy=True,
                                sort=False, ignore_index=True)
#                 dfr = dfr.append(new_df,
#                                  ignore_index=True,
#                                  verify_integrity=False,
#                                  sort=False).reset_index(drop=True)
            save_df(dfr, eval_df_path)

        if errors:
            print('%d prediction(s) discarded due to error' % len(errors))
            print('(possible cause: only one class found in the prediction)')
Example #48
0
def file_checker(top, itp, suff):
    """This function checks for .itp (not containing 'posre' or '/') included in .top and try to
    look for them in the current directory or in the directory name of the .top file given by -p.\n
    Check if .itp/.top files have an [ atoms ] block to modifiy, else remove it from input files.\n
    Finally, builds output names for the topologies"""

    if top != []:
        print(f'Working on following .top file: {top}\n')
        regxitp = re.compile(r'(?<=#include ")(?!.*(\b\/\b|posre)).*(?=")')
        with open(top[0], 'r') as intop:
            for line in intop:
                if regxitp.search(line):
                    if regxitp.search(line).group() not in itp:
                        itp.append(regxitp.search(line).group())

        # Check if those .itp are in current directory or in the dirname of .top given by -p
        for i in itp:
            if not path.isfile(i):
                dirname_of_top = path.dirname(top[0])
                print(
                    f"{i} included in .top not found in './', "
                    f"let's look in dirname of the -p argument (i.e. {dirname_of_top})"
                )
                guess_itp_path = f'{dirname_of_top}/{i}'
                if path.isfile(guess_itp_path):
                    print(f'{i} found in {dirname_of_top}')
                    itp[itp.index(i)] = guess_itp_path
                else:
                    print(
                        f"{i} included in .top also not present in {dirname_of_top}:"
                        f" exiting script")
                    raise SystemExit()

    topols = top + itp

    regx_atmblock = re.compile(r'^\[ atoms \]\n')
    topol2rm = []

    for i in topols:
        keep = 0
        with open(i, 'r') as topfile:
            for line in topfile:
                if regx_atmblock.search(line):
                    keep = 1
        if keep == 0:
            print(f'No [ atoms ] block in {i}, this file will be ignored')
            topol2rm.append(i)

    topols = [topfile for topfile in topols if topfile not in topol2rm]

    print(
        f'\nThe following .itp and/or .top will be used for generating heavy H topologies:'
        f'\n{topols}\n')

    topolout = [
        re.sub(r'(\.itp|\.top)', '', path.basename(name)) + suff + '.' +
        name.split('.')[-1] for name in topols
    ]

    topolin_topolout = {topols[i]: topolout[i] for i in range(len(topols))}

    return topolin_topolout
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile

cifar10_dataset_folder_path = 'cifar-10-batches-py'

class DLProgress(tqdm):
    last_block = 0

    def hook(self, block_num=1, block_size=1, total_size=None):
        self.total = total_size
        self.update((block_num - self.last_block) * block_size)
        self.last_block = block_num

if not isfile('cifar-10-python.tar.gz'):
    with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
        urlretrieve(
            'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
            'cifar-10-python.tar.gz',
            pbar.hook)

if not isdir(cifar10_dataset_folder_path):
    with tarfile.open('cifar-10-python.tar.gz') as tar:
        tar.extractall()
        tar.close()


tests.test_folder_path(cifar10_dataset_folder_path)

Example #50
0
from pkgutil import extend_path

__extended_path = "/home/callum/buggy_ws/src/robot_upstart-jade-devel/src".split(
    ";")
for p in reversed(__extended_path):
    sys_path.insert(0, p)
    del p
del sys_path

__path__ = extend_path(__path__, __name__)
del extend_path

__execfiles = []
for p in __extended_path:
    src_init_file = os_path.join(p, __name__ + '.py')
    if os_path.isfile(src_init_file):
        __execfiles.append(src_init_file)
    else:
        src_init_file = os_path.join(p, __name__, '__init__.py')
        if os_path.isfile(src_init_file):
            __execfiles.append(src_init_file)
    del src_init_file
    del p
del os_path
del __extended_path

for __execfile in __execfiles:
    with open(__execfile, 'r') as __fh:
        exec(__fh.read())
    del __fh
    del __execfile
def runTraining(opts):
    print('' * 41)
    print('~' * 50)
    print('~~~~~~~~~~~~~~~~~  PARAMETERS ~~~~~~~~~~~~~~~~')
    print('~' * 50)
    print('  - Number of image modalities: {}'.format(opts.numModal))
    print('  - Number of classes: {}'.format(opts.numClasses))
    print('  - Directory to load images: {}'.format(opts.root_dir))
    for i in range(len(opts.modality_dirs)):
        print('  - Modality {}: {}'.format(i + 1, opts.modality_dirs[i]))
    print('  - Directory to save results: {}'.format(opts.save_dir))
    print('  - To model will be saved as : {}'.format(opts.modelName))
    print('-' * 41)
    print('  - Number of epochs: {}'.format(opts.numClasses))
    print('  - Batch size: {}'.format(opts.batchSize))
    print('  - Number of samples per epoch: {}'.format(opts.numSamplesEpoch))
    print('  - Learning rate: {}'.format(opts.l_rate))
    print('' * 41)

    print('-' * 41)
    print('~~~~~~~~  Starting the training... ~~~~~~')
    print('-' * 41)
    print('' * 40)
    a = 64
    b = 64
    samplesPerEpoch = opts.numSamplesEpoch
    batch_size = opts.batchSize

    lr = opts.l_rate
    epoch = opts.numEpochs

    root_dir = opts.root_dir
    model_name = opts.modelName

    if not (len(opts.modality_dirs) == opts.numModal): raise AssertionError

    moda_1 = root_dir + 'Training/' + opts.modality_dirs[0]
    moda_2 = root_dir + 'Training/' + opts.modality_dirs[1]

    if (opts.numModal == 3):
        moda_3 = root_dir + 'Training/' + opts.modality_dirs[2]

    moda_g = root_dir + 'Training/GT'

    print(' --- Getting image names.....')
    print(' - Training Set: -')
    if os.path.exists(moda_1):
        imageNames_tr = [
            f for f in os.listdir(moda_1) if isfile(join(moda_1, f))
        ]
        np.random.seed(1)
        np.random.shuffle(imageNames_tr)
        imageNames_val = imageNames_tr[0:40]
        imageNames_train = list(set(imageNames_tr) - set(imageNames_val))
        print(' ------- Images found ------')
        for i in range(len(imageNames_train)):
            print(' - {}'.format(imageNames_train[i]))
    else:
        raise Exception(' - {} does not exist'.format(moda_1))
    moda_1_val = root_dir + 'Training/' + opts.modality_dirs[0]
    moda_2_val = root_dir + 'Training/' + opts.modality_dirs[1]

    if (opts.numModal == 3):
        moda_3_val = root_dir + 'Training/' + opts.modality_dirs[2]
    moda_g_val = root_dir + 'Training/GT'

    print(' --------------------')
    print(' - Validation Set: -')
    if os.path.exists(moda_1):
        # imageNames_val = [f for f in os.listdir(moda_1_val) if isfile(join(moda_1_val, f))]
        # imageNames_val.sort()
        print(' ------- Images found ------')
        for i in range(len(imageNames_val)):
            print(' - {}'.format(imageNames_val[i]))
    else:
        raise Exception(' - {} does not exist'.format(moda_1_val))

    print("~~~~~~~~~~~ Creating the model ~~~~~~~~~~")
    num_classes = opts.numClasses
    # Define UNet_FEE_2mod
    # To-Do. Get as input the config settings to create different networks
    if (opts.numModal == 2):
        hdNet = UNet_FEE(in_dim=2, out_dim=1, num_filters=4)

    #
    '''try:
        hdNet = torch.load(os.path.join(model_name, "Best_" + model_name + ".pkl"))
        print("--------model restored--------")
    except:
        print("--------model not restored--------")
        pass'''

    #softMax = nn.Softmax()
    softMax = nn.Sigmoid()
    CE_loss = DicexCELoss()

    if torch.cuda.is_available():
        hdNet.cuda()
        softMax.cuda()
        CE_loss.cuda()

    # To-DO: Check that optimizer is the same (and same values) as the Theano implementation
    optimizer = torch.optim.Adam(hdNet.parameters(), lr=lr, betas=(0.9, 0.999))
    #optimizer = torch.optim.SGD(hdNet.parameters(), lr=lr, momentum = 0.6)
    print(" ~~~~~~~~~~~ Starting the training ~~~~~~~~~~")

    dscAll = []
    accall = []
    train_eval = []
    dsc_eval = []
    acc_eval = []

    d1 = 0

    if (opts.numModal == 2):
        imgPaths = [moda_1, moda_2]

    if (opts.numModal == 3):
        imgPaths = [moda_1, moda_2, moda_3]
    val_epochs = [0, 21, 41, 61, 81, 101, 121, 141]
    x_train, y_train, img_shape = load_data_trainG(imgPaths, moda_g,
                                                   imageNames_train,
                                                   samplesPerEpoch,
                                                   opts.numModal)
    print(x_train.shape)
    numBatches = int(x_train.shape[0] / batch_size)
    idx = np.arange(x_train.shape[0])
    for e_i in range(epoch):
        hdNet.train()
        lossEpoch = []
        np.random.shuffle(idx)
        x_train = x_train[idx]
        y_train = y_train[idx]
        for b_i in range(numBatches):
            optimizer.zero_grad()
            hdNet.zero_grad()
            MRIs = numpy_to_var(x_train[b_i * batch_size:b_i * batch_size +
                                        batch_size, :, :, :, :])
            Segmentation = numpy_to_var(
                y_train[b_i * batch_size:b_i * batch_size +
                        batch_size, :, :, :])

            segmentation_prediction = hdNet(MRIs)
            #print("segmentation_prediction : ", segmentation_prediction.shape)
            #predClass_y = softMax(segmentation_prediction)
            segmentation_prediction = softMax(segmentation_prediction)
            # To adapt CE to 3D
            # LOGITS:
            segmentation_prediction = segmentation_prediction.permute(
                0, 2, 3, 4, 1).contiguous()
            segmentation_prediction = segmentation_prediction.reshape(
                segmentation_prediction.numel() // num_classes, num_classes)
            if e_i == 0 and b_i == 0:
                print("MRIS : ", MRIs.shape)
                print("Segmentation : ", Segmentation.shape)
                print("segmentation_prediction : ",
                      segmentation_prediction.shape)
            CE_loss_batch = CE_loss(
                segmentation_prediction,
                Segmentation.reshape(-1).type(torch.cuda.FloatTensor))
            loss = CE_loss_batch
            loss.backward()

            optimizer.step()
            lossEpoch.append(CE_loss_batch.cpu().data.numpy())

            printProgressBar(b_i + 1,
                             numBatches,
                             prefix="[Training] Epoch: {} ".format(e_i),
                             length=15)

            del MRIs
            del Segmentation
            del segmentation_prediction
            # del predClass_y

        if not os.path.exists(model_name):
            os.makedirs(model_name)

        np.save(os.path.join(model_name, model_name + '_loss.npy'), dscAll)

        print(' Epoch: {}, loss: {}'.format(e_i, np.mean(lossEpoch)))

        if (e_i % 5) == 0:

            if (opts.numModal == 2):
                moda_n = [moda_1_val, moda_2_val]
            if (opts.numModal == 3):
                moda_n = [moda_1_val, moda_2_val, moda_3_val]

            dsct, acct = inferencee(hdNet, x_train, y_train, imageNames_train,
                                    e_i, opts.save_dir, opts.numModal)
            dsc_eval.append(dsct)
            acc_eval.append(acct)
            print(' Metrics: The mean of train Accuracy is : {} '.format(acct))
            print(' Metrics: The mean of train DSC is : {} '.format(dsct))
            np.save(os.path.join(model_name, model_name + 'train_DSCs.npy'),
                    dsc_eval)
            np.save(os.path.join(model_name, model_name + 'train_ACC.npy'),
                    acc_eval)

            dsc, acc = inference(hdNet, moda_n, moda_g_val, imageNames_val,
                                 e_i, opts.save_dir, opts.numModal)

            dscAll.append(dsc)
            accall.append(acc)

            print(' Metrics: The mean of Accuracy is : {} '.format(
                np.mean(acc)))
            print(' Metrics: The mean of DSC is : {} '.format(np.mean(dsc)))
            if not os.path.exists(model_name):
                os.makedirs(model_name)

            np.save(os.path.join(model_name, model_name + '_DSCs.npy'), dscAll)
            np.save(os.path.join(model_name, model_name + '_ACC.npy'), accall)

            if np.mean(dsc) > 0.60:
                if not os.path.exists(model_name):
                    os.makedirs(model_name)
                torch.save(
                    hdNet,
                    os.path.join(model_name,
                                 "Best2_" + model_name + str(e_i) + ".pkl"))
    """
Example #52
0
def run_evaluation(training_param, test_param, destdir):
    '''Runs the model evaluation
    '''
    if not isdir(destdir):
        raise NotADirectoryError(''"%s"'' % destdir)
    print('Running Evaluator. All files will be stored in:\n%s' %
          destdir)

    print('Step 1 of 2: Training (creating models)')
    newly_created_models = 0
    print('Reading Training file (HDF)')
    # returns iterable of classifier TO BE CREATED (already existing are not
    # yielded):
    iterargs = list(training_param.iterargs(destdir))
    if iterargs:
        pool = Pool(processes=int(cpu_count()))
        with click.progressbar(length=len(iterargs),
                               fill_char='o', empty_char='.') as pbar:
            try:
                print('Building classifiers from parameters and training file')
                for clf, destpath in \
                        pool.imap_unordered(_classifier_mp, iterargs):
                    # save on the main process (here):
                    if clf is not None:
                        save_clf(clf, destpath)
                        newly_created_models += 1
                    pbar.update(1)
                # absolutely call these methods
                # although in impa and imap unordered
                # do make sense?
                pool.close()
                pool.join()
            except Exception as exc:
                _kill_pool(pool, str(exc))
                raise exc

    classifier_paths = [
        _ for _ in training_param.classifier_paths(destdir) if isfile(_)
    ]
    print("%d of %d models created (already existing were not overwritten)" %
          (newly_created_models, len(classifier_paths)))

    print('Step 2 of 2: Testing (creating prediction data frames)')
    pred_filepaths = []
    test_param.set_classifiers_paths(classifier_paths)
    # with set_classifiers_paths above, we internally stored classifiers
    # who do NOT have a prediction dataframe. `num_iterations` below accounts
    # for thius, thus being zero for several reasons, among which also
    # if all classifiers already have a relative prediction dataframe:
    num_iterations = test_param.num_iterations
    if num_iterations:
        pool = Pool(processes=int(cpu_count()))
        with click.progressbar(length=num_iterations,
                               fill_char='o', empty_char='.') as pbar:
            try:
                for test_df_chunk in test_param.read_testset():
                    iterargs = test_param.iterargs(test_df_chunk)
                    for pred_df, destpath in \
                            pool.imap_unordered(_predict_mp, iterargs):
                        if pred_df is not None:
                            # save on the main process (here):
                            kwargs = {'append': True}
                            if test_param.min_itemsize:
                                kwargs['min_itemsize'] = test_param.min_itemsize
                            save_df(pred_df, destpath, **kwargs)
                            if destpath not in pred_filepaths:
                                pred_filepaths.append(destpath)
                        pbar.update(1)
                # absolutely call these methods,
                # although in imap and imap unordered
                # do make sense?
                pool.close()
                pool.join()
            except Exception as exc:
                _kill_pool(pool, str(exc))
                raise exc
    print("%d of %d prediction HDF file(s) created "
          "(already existing were not overwritten)" %
          (len(pred_filepaths), len(classifier_paths)))

    print()
    print('Creating evaluation metrics')
    create_summary_evaluationmetrics(destdir)
    print('DONE')
Example #53
0
#!/usr/bin/python

import os
from os import listdir
from os.path import isfile, join
from subprocess import call

fileList = [ f for f in listdir(".") if isfile(join(".",f)) and f.endswith('.zip') ]

for f in fileList:
    name = f.strip('.zip')
    os.mkdir(name)
    os.chdir(name)
    call(["unzip","../"+f])
    os.chdir('..')

os.rename('twinnings','twinings')
Example #54
0
 def initialize_options(self):
     tf_gcc = check_tf_cuda_compatibility()
     self.gcc = tf_gcc if isfile(tf_gcc) else 'gcc'
     self.nvcc = '/usr/local/cuda/bin/nvcc' if isfile('/usr/local/cuda/bin/nvcc') else 'nvcc'
     self.cuda_lib = '/usr/local/cuda/lib64/'
Example #55
0
    def read_doc(self, docname, app=None):
        """Parse a file and add/update inventory entries for the doctree."""

        self.temp_data['docname'] = docname
        # defaults to the global default, but can be re-set in a document
        self.temp_data['default_domain'] = \
            self.domains.get(self.config.primary_domain)

        self.settings['input_encoding'] = self.config.source_encoding
        self.settings['trim_footnote_reference_space'] = \
            self.config.trim_footnote_reference_space
        self.settings['gettext_compact'] = self.config.gettext_compact

        docutilsconf = path.join(self.srcdir, 'docutils.conf')
        # read docutils.conf from source dir, not from current dir
        OptionParser.standard_config_files[1] = docutilsconf
        if path.isfile(docutilsconf):
            self.note_dependency(docutilsconf)

        with sphinx_domains(self):
            if self.config.default_role:
                role_fn, messages = roles.role(self.config.default_role, english,
                                               0, dummy_reporter)
                if role_fn:
                    roles._roles[''] = role_fn
                else:
                    self.warn(docname, 'default role %s not found' %
                              self.config.default_role)

            codecs.register_error('sphinx', self.warn_and_replace)

            # publish manually
            reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
            pub = Publisher(reader=reader,
                            writer=SphinxDummyWriter(),
                            destination_class=NullOutput)
            pub.set_components(None, 'restructuredtext', None)
            pub.process_programmatic_settings(None, self.settings, None)
            src_path = self.doc2path(docname)
            source = SphinxFileInput(app, self, source=None, source_path=src_path,
                                     encoding=self.config.source_encoding)
            pub.source = source
            pub.settings._source = src_path
            pub.set_destination(None, None)
            pub.publish()
            doctree = pub.document

        # post-processing
        self.process_dependencies(docname, doctree)
        self.process_images(docname, doctree)
        self.process_downloads(docname, doctree)
        self.process_metadata(docname, doctree)
        self.create_title_from(docname, doctree)
        for manager in itervalues(self.managers):
            manager.process_doc(docname, doctree)
        for domain in itervalues(self.domains):
            domain.process_doc(self, docname, doctree)

        # allow extension-specific post-processing
        if app:
            app.emit('doctree-read', doctree)

        # store time of reading, for outdated files detection
        # (Some filesystems have coarse timestamp resolution;
        # therefore time.time() can be older than filesystem's timestamp.
        # For example, FAT32 has 2sec timestamp resolution.)
        self.all_docs[docname] = max(
            time.time(), path.getmtime(self.doc2path(docname)))

        if self.versioning_condition:
            old_doctree = None
            if self.versioning_compare:
                # get old doctree
                try:
                    with open(self.doc2path(docname,
                                            self.doctreedir, '.doctree'), 'rb') as f:
                        old_doctree = pickle.load(f)
                except EnvironmentError:
                    pass

            # add uids for versioning
            if not self.versioning_compare or old_doctree is None:
                list(add_uids(doctree, self.versioning_condition))
            else:
                list(merge_doctrees(
                    old_doctree, doctree, self.versioning_condition))

        # make it picklable
        doctree.reporter = None
        doctree.transformer = None
        doctree.settings.warning_stream = None
        doctree.settings.env = None
        doctree.settings.record_dependencies = None

        # cleanup
        self.temp_data.clear()
        self.ref_context.clear()
        roles._roles.pop('', None)  # if a document has set a local default role

        # save the parsed doctree
        doctree_filename = self.doc2path(docname, self.doctreedir,
                                         '.doctree')
        ensuredir(path.dirname(doctree_filename))
        with open(doctree_filename, 'wb') as f:
            pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
Example #56
0
    def test_CreatePythonEntryPointAction_noarch_python(self):
        target_python_version = '%d.%d' % sys.version_info[:2]
        transaction_context = {
            'target_python_version': target_python_version,
        }
        package_info = AttrDict(package_metadata=AttrDict(noarch=AttrDict(
            type=NoarchType.python,
            entry_points=(
                'command1=some.module:main',
                'command2=another.somewhere:go',
            ),
        )))

        axns = CreatePythonEntryPointAction.create_actions(transaction_context, package_info,
                                                           self.prefix, LinkType.hardlink)
        grouped_axns = groupby(lambda ax: isinstance(ax, LinkPathAction), axns)
        windows_exe_axns = grouped_axns.get(True, ())
        assert len(windows_exe_axns) == (2 if on_win else 0)
        py_ep_axns = grouped_axns.get(False, ())
        assert len(py_ep_axns) == 2

        py_ep_axn = py_ep_axns[0]

        command, module, func = parse_entry_point_def('command1=some.module:main')
        assert command == 'command1'
        if on_win:
            target_short_path = "%s\\%s-script.py" % (get_bin_directory_short_path(), command)
        else:
            target_short_path = "%s/%s" % (get_bin_directory_short_path(), command)
        assert py_ep_axn.target_full_path == join(self.prefix, target_short_path)
        assert py_ep_axn.module == module == 'some.module'
        assert py_ep_axn.func == func == 'main'

        mkdir_p(dirname(py_ep_axn.target_full_path))
        py_ep_axn.execute()
        assert isfile(py_ep_axn.target_full_path)
        if not on_win:
            assert is_executable(py_ep_axn.target_full_path)
        with open(py_ep_axn.target_full_path) as fh:
            lines = fh.readlines()
            first_line = lines[0].strip()
            last_line = lines[-1].strip()
        if not on_win:
            python_full_path = join(self.prefix, get_python_short_path(target_python_version))
            assert first_line == "#!%s" % python_full_path
        assert last_line == "exit(%s())" % func

        py_ep_axn.reverse()
        assert not isfile(py_ep_axn.target_full_path)

        if on_win:
            windows_exe_axn = windows_exe_axns[0]
            target_short_path = "%s\\%s.exe" % (get_bin_directory_short_path(), command)
            assert windows_exe_axn.target_full_path == join(self.prefix, target_short_path)

            mkdir_p(dirname(windows_exe_axn.target_full_path))
            windows_exe_axn.verify()
            windows_exe_axn.execute()
            assert isfile(windows_exe_axn.target_full_path)
            assert is_executable(windows_exe_axn.target_full_path)

            src = compute_md5sum(join(context.conda_prefix, 'Scripts/conda.exe'))
            assert src == compute_md5sum(windows_exe_axn.target_full_path)

            windows_exe_axn.reverse()
            assert not isfile(windows_exe_axn.target_full_path)
Example #57
0
async def google_drive(gdrive):
    reply = ''
    """ - Parsing all google drive function - """
    value = gdrive.pattern_match.group(1)
    file_path = None
    uri = None
    if not value and not gdrive.reply_to_msg_id:
        return
    elif value and gdrive.reply_to_msg_id:
        return await gdrive.edit(
            "`[UNKNOWN - ERROR]`\n\n"
            "`Status` : **failed**\n"
            "`Reason` : Confused to upload file or the replied message/media."
        )
    service = await create_app(gdrive)
    if service is False:
        return
    if isfile(value):
        file_path = value
        if file_path.endswith(".torrent"):
            uri = [file_path]
            file_path = None
    elif isdir(value):
        folder_path = value
        global parent_Id
        folder_name = await get_raw_name(folder_path)
        folder = await create_dir(service, folder_name)
        parent_Id = folder.get('id')
        webViewURL = "https://drive.google.com/drive/folders/" + parent_Id
        try:
            await task_directory(gdrive, service, folder_path)
        except CancelProcess:
            await gdrive.respond(
                "`[FOLDER - CANCELLED]`\n\n"
                "`Status` : **OK** - received signal cancelled."
            )
            await reset_parentId()
            return await gdrive.delete()
        except Exception as e:
            await gdrive.edit(
                "`[FOLDER - UPLOAD]`\n\n"
                f"`{folder_name}`\n"
                "`Status` : **BAD**\n"
                f"`Reason` : {str(e)}"
            )
            return await reset_parentId()
        else:
            await gdrive.edit(
                "`[FOLDER - UPLOAD]`\n\n"
                f"[{folder_name}]({webViewURL})\n"
                "`Status` : **OK** - Successfully uploaded.\n\n"
            )
            return await reset_parentId()
    elif not value and gdrive.reply_to_msg_id:
        reply += await download(gdrive, service)
        await gdrive.respond(reply)
        return await gdrive.delete()
    else:
        if re.findall(r'\bhttps?://drive\.google\.com\S+', value):
            """ - Link is google drive fallback to download - """
            value = re.findall(r'\bhttps?://drive\.google\.com\S+', value)
            for uri in value:
                try:
                    reply += await download_gdrive(gdrive, service, uri)
                except CancelProcess:
                    reply += (
                        "`[FILE - CANCELLED]`\n\n"
                        "`Status` : **OK** - received signal cancelled."
                    )
                    break
                except Exception as e:
                    reply += (
                        "`[FILE - ERROR]`\n\n"
                        "`Status` : **BAD**\n"
                        f"`Reason` : {str(e)}\n\n"
                    )
                    continue
            if reply:
                await gdrive.respond(reply, link_preview=False)
                return await gdrive.delete()
            else:
                return
        elif re.findall(r'\bhttps?://.*\.\S+', value) or "magnet:?" in value:
            uri = value.split()
        else:
            for fileId in value.split():
                if any(map(str.isdigit, fileId)):
                    one = True
                else:
                    one = False
                if "-" in fileId or "_" in fileId:
                    two = True
                else:
                    two = False
                if True in [one or two]:
                    try:
                        reply += await download_gdrive(gdrive, service, fileId)
                    except CancelProcess:
                        reply += (
                            "`[FILE - CANCELLED]`\n\n"
                            "`Status` : **OK** - received signal cancelled."
                        )
                        break
                    except Exception as e:
                        reply += (
                            "`[FILE - ERROR]`\n\n"
                            "`Status` : **BAD**\n"
                            f"`Reason` : {str(e)}\n\n"
                        )
                        continue
            if reply:
                await gdrive.respond(reply, link_preview=False)
                return await gdrive.delete()
            else:
                return
        if not uri and not gdrive.reply_to_msg_id:
            return await gdrive.edit(
                "`[VALUE - ERROR]`\n\n"
                "`Status` : **BAD**\n"
                "`Reason` : given value is not URL nor file/folder path. "
                "If you think this is wrong, maybe you use .gd with multiple "
                "value of files/folders, e.g `.gd <filename1> <filename2>` "
                "for upload from files/folders path this doesn't support it."
            )
    if uri and not gdrive.reply_to_msg_id:
        for dl in uri:
            try:
                reply += await download(gdrive, service, dl)
            except Exception as e:
                if " not found" in str(e) or "'file'" in str(e):
                    reply += (
                        "`[FILE - CANCELLED]`\n\n"
                        "`Status` : **OK** - received signal cancelled."
                    )
                    await asyncio.sleep(2.5)
                    break
                else:
                    """ - if something bad happened, continue to next uri - """
                    reply += (
                        "`[UNKNOWN - ERROR]`\n\n"
                        "`Status` : **BAD**\n"
                        f"`Reason` : `{dl}` | `{str(e)}`\n\n"
                    )
                    continue
        await gdrive.respond(reply, link_preview=False)
        return await gdrive.delete()
    mimeType = await get_mimeType(file_path)
    file_name = await get_raw_name(file_path)
    try:
        result = await upload(gdrive, service,
                              file_path, file_name, mimeType)
    except CancelProcess:
        gdrive.respond(
            "`[FILE - CANCELLED]`\n\n"
            "`Status` : **OK** - received signal cancelled."
        )
    if result:
        await gdrive.respond(
            "`[FILE - UPLOAD]`\n\n"
            f"`Name   :` `{file_name}`\n"
            f"`Size   :` `{humanbytes(result[0])}`\n"
            f"`Link   :` [{file_name}]({result[1]})\n"
            "`Status :` **OK** - Successfully uploaded.\n",
            link_preview=False
            )
    await gdrive.delete()
    return
Example #58
0
    def _run_interface(self, runtime):

        raw_filename = self.inputs.raw_filename
        cov_fname_in = self.inputs.cov_fname_in
        is_epoched = self.inputs.is_epoched
        is_evoked = self.inputs.is_evoked
        events_id = self.inputs.events_id
        t_min = self.inputs.t_min
        t_max = self.inputs.t_max

        data_path, basename, ext = split_f(raw_filename)

        self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)

        if not op.isfile(cov_fname_in):
            if is_epoched and is_evoked:
                raw = read_raw_fif(raw_filename)
                events = find_events(raw)

                if not op.isfile(self.cov_fname_out):
                    print(('\n*** COMPUTE COV FROM EPOCHS ***\n' +
                           self.cov_fname_out))

                    reject = create_reject_dict(raw.info)
                    picks = pick_types(raw.info,
                                       meg=True,
                                       ref_meg=False,
                                       exclude='bads')

                    epochs = Epochs(raw,
                                    events,
                                    events_id,
                                    t_min,
                                    t_max,
                                    picks=picks,
                                    baseline=(None, 0),
                                    reject=reject)

                    # TODO method='auto'? too long!!!
                    noise_cov = compute_covariance(epochs,
                                                   tmax=0,
                                                   method='diagonal_fixed')
                    write_cov(self.cov_fname_out, noise_cov)
                else:
                    print(('\n *** NOISE cov file %s exists!!! \n' %
                           self.cov_fname_out))
            else:
                '\n *** RAW DATA \n'
                for er_fname in glob.glob(op.join(data_path, cov_fname_in)):
                    print(('\n found file name %s  \n' % er_fname))

                try:
                    if er_fname.rfind('cov.fif') > -1:
                        print(('\n *** NOISE cov file %s exists!! \n' %
                               er_fname))
                        self.cov_fname_out = er_fname
                    else:
                        if er_fname.rfind('.fif') > -1:
                            er_raw = read_raw_fif(er_fname)
                            er_fname = er_fname.replace('.fif', '-raw-cov.fif')
                        elif er_fname.rfind('.ds') > -1:
                            er_raw = read_raw_ctf(er_fname)
                            er_fname = er_fname.replace('.ds', '-raw-cov.fif')

                        self.cov_fname_out = op.join(data_path, er_fname)

                        if not op.isfile(self.cov_fname_out):
                            reject = create_reject_dict(er_raw.info)
                            picks = pick_types(er_raw.info,
                                               meg=True,
                                               ref_meg=False,
                                               exclude='bads')

                            noise_cov = compute_raw_covariance(er_raw,
                                                               picks=picks,
                                                               reject=reject)
                            write_cov(self.cov_fname_out, noise_cov)
                        else:
                            print(('\n *** NOISE cov file %s exists!!! \n' %
                                   self.cov_fname_out))
                except NameError:
                    sys.exit("No covariance matrix as input!")
                    # TODO creare una matrice diagonale?

        else:
            print(('\n *** NOISE cov file %s exists!!! \n' % cov_fname_in))
            self.cov_fname_out = cov_fname_in

        return runtime
Example #59
0
async def download(gdrive, service, uri=None):
    reply = ''
    """ - Download files to local then upload - """
    if not isdir(TEMP_DOWNLOAD_DIRECTORY):
        os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
        required_file_name = None
    if uri:
        full_path = os.getcwd() + TEMP_DOWNLOAD_DIRECTORY.strip('.')
        if isfile(uri) and uri.endswith(".torrent"):
            downloads = aria2.add_torrent(
                uri,
                uris=None,
                options={'dir': full_path},
                position=None)
        else:
            uri = [uri]
            downloads = aria2.add_uris(
                uri,
                options={'dir': full_path},
                position=None)
        gid = downloads.gid
        await check_progress_for_dl(gdrive, gid, previous=None)
        file = aria2.get_download(gid)
        filename = file.name
        if file.followed_by_ids:
            new_gid = await check_metadata(gid)
            await check_progress_for_dl(gdrive, new_gid, previous=None)
        try:
            required_file_name = TEMP_DOWNLOAD_DIRECTORY + filenames
        except Exception:
            required_file_name = TEMP_DOWNLOAD_DIRECTORY + filename
    else:
        try:
            current_time = time.time()
            downloaded_file_name = await gdrive.client.download_media(
                await gdrive.get_reply_message(),
                TEMP_DOWNLOAD_DIRECTORY,
                progress_callback=lambda d, t: asyncio.get_event_loop(
                ).create_task(progress(d, t, gdrive, current_time,
                                       "[FILE - DOWNLOAD]")))
        except Exception as e:
            await gdrive.edit(str(e))
        else:
            required_file_name = downloaded_file_name
    try:
        file_name = await get_raw_name(required_file_name)
    except AttributeError:
        reply += (
            "`[ENTRY - ERROR]`\n\n"
            "`Status` : **BAD**\n"
        )
        return reply
    mimeType = await get_mimeType(required_file_name)
    try:
        status = "[FILE - UPLOAD]"
        if isfile(required_file_name):
            try:
                result = await upload(
                    gdrive, service, required_file_name, file_name, mimeType)
            except CancelProcess:
                reply += (
                    "`[FILE - CANCELLED]`\n\n"
                    "`Status` : **OK** - received signal cancelled."
                )
                return reply
            else:
                reply += (
                    f"`{status}`\n\n"
                    f"`Name   :` `{file_name}`\n"
                    f"`Size   :` `{humanbytes(result[0])}`\n"
                    f"`Link   :` [{file_name}]({result[1]})\n"
                    "`Status :` **OK** - Successfully uploaded.\n\n"
                )
                return reply
        else:
            status = status.replace("[FILE", "[FOLDER")
            global parent_Id
            folder = await create_dir(service, file_name)
            parent_Id = folder.get('id')
            webViewURL = (
                "https://drive.google.com/drive/folders/"
                + parent_Id
            )
            try:
                await task_directory(gdrive, service, required_file_name)
            except CancelProcess:
                reply += (
                    "`[FOLDER - CANCELLED]`\n\n"
                    "`Status` : **OK** - received signal cancelled."
                 )
                await reset_parentId()
                return reply
            except Exception:
                await reset_parentId()
            else:
                reply += (
                    f"`{status}`\n\n"
                    f"[{file_name}]({webViewURL})\n"
                    "`Status` : **OK** - Successfully uploaded.\n\n"
                )
                await reset_parentId()
                return reply
    except Exception as e:
        status = status.replace("DOWNLOAD]", "ERROR]")
        reply += (
            f"`{status}`\n\n"
            "`Status` : **failed**\n"
            f"`Reason` : `{str(e)}`\n\n"
        )
        return reply
    return
Example #60
0
from lib.router import Router
router = Router()
print '>> Walk, Don`t Run'
# --------------------------------------------------------------------------
#STEP: modify version?

configVersion = config['version']
config['version'] = round(
    float(configVersion) +
    .1, 1) if config['options']['increment-version'] == True else configVersion

#STEP: load
sourcePath = router.getRoute(
    config['source']['route']) + config['source']['dir']
sourceFilesNames = [
    f for f in listdir(sourcePath) if isfile(join(sourcePath, f))
]
sourceFilesNames.remove('links.json')
'''
Create data frame with: participantName, date, clicks, in, time, dayWeek
- replace characters
-> list by particpant with: {in, clicks, date, time}, sort by date
'''

# STEP: create registry of participants
fileIdx = 0
rawObj = open(sourcePath + sourceFilesNames[fileIdx], 'r')
fileJsonContent = json.load(rawObj)

sys.exit(0)