Пример #1
0
    def run_upgrade(self):
        nid = ""
        aptHasErrors = self.apt.aptHasErrors()
        if aptHasErrors is not None:
            MessageDialog(self.aptErrorText, aptHasErrors)
        elif self.upgradables:

            if self.apt.upgradablePackages:
                self.log.write("=================== upgradable pacages ====================", "UM.run_upgrade", "debug")
                self.log.write(self.createLogString(self.apt.upgradablePackages), "UM.run_upgrade", "debug")
            if self.apt.removedPackages:
                self.log.write("==================== removed packages =====================", "UM.run_upgrade", "debug")
                self.log.write(self.createLogString(self.apt.removedPackages), "UM.run_upgrade", "debug")
            if self.apt.newPackages:
                self.log.write("======================= new packages =======================", "UM.run_upgrade", "debug")
                self.log.write(self.createLogString(self.apt.newPackages), "UM.run_upgrade", "debug")
            if self.apt.heldbackPackages:
                self.log.write("=================== kept back packages =====================", "UM.run_upgrade", "debug")
                self.log.write(self.createLogString(self.apt.heldbackPackages), "UM.run_upgrade", "debug")

            if not self.quickUpdate:
                self.showOutput()
            contMsg = _("Continue installation?")
            if self.upgradableUM:
                cmd = "%s install updatemanager" % self.umglobal.settings['apt-get-string']
                cmd += "; %s install %s" % (self.umglobal.settings['apt-get-string'], " ".join(self.apt.getPackageDependencies('updatemanager')))
                nid = 'uminstallum'
                self.prepForCommand(nid)
                if self.quickUpdate:
                    self.ec.run(cmd)
                else:
                    self.terminal.executeCommand(cmd, nid)
                self.log.write("Execute command: %s (%s)" % (cmd, nid), "UM.on_btnInstall_clicked", "debug")
            else:
                msg = self.getDistUpgradeInfo()
                answer = True
                if msg != "":
                    answer = self.showConfirmationDlg(contMsg, msg)
                if answer:
                    cmd = "%s dist-upgrade" % self.umglobal.settings['apt-get-string']
                    #if self.umglobal.newUpd:
                    pre = join(self.umglobal.filesDir, self.umglobal.settings['pre-upd'].replace("[VERSION]", self.umglobal.serverUpdVersion))
                    post = join(self.umglobal.filesDir, self.umglobal.settings['post-upd'].replace("[VERSION]", self.umglobal.serverUpdVersion))
                    if exists(pre):
                        cmd = "/bin/bash %(pre)s; %(cmd)s" % { "pre": pre, "cmd": cmd }
                    if exists(post):
                        cmd = "%(cmd)s; /bin/bash %(post)s" % { "cmd": cmd, "post": post }
                    nid = 'umupd'
                    self.prepForCommand(nid)
                    if self.quickUpdate:
                        self.ec.run(cmd)
                    else:
                        self.terminal.executeCommand(cmd, nid)
                    self.log.write("Execute command: %s (%s)" % (cmd, nid), "UM.on_btnInstall_clicked", "debug")

        else:
            if not self.quickUpdate:
                MessageDialog(self.btnInstall.get_label(), self.uptodateText)

        return nid
Пример #2
0
def results_table(path_dict):
    """ Return precalculated results images for subject info in `path_dict`

    Parameters
    ----------
    path_dict : dict
        containing key 'rootdir'

    Returns
    -------
    rtab : dict
        dict with keys given by run directories for this subject, values being a
        list with filenames of effect and sd images.
    """
    # Which runs correspond to this design type?
    rootdir = path_dict['rootdir']
    runs = filter(lambda f: isdir(pjoin(rootdir, f)),
                  ['results_%02d' % i for i in range(1,5)] )

    # Find out which contrasts have t-statistics,
    # storing the filenames for reading below

    results = {}

    for rundir in runs:
        rundir = pjoin(rootdir, rundir)
        for condir in listdir(rundir):
            for stat in ['sd', 'effect']:
                fname_effect = abspath(pjoin(rundir, condir, 'effect.nii'))
                fname_sd = abspath(pjoin(rundir, condir, 'sd.nii'))
            if exists(fname_effect) and exists(fname_sd):
                results.setdefault(condir, []).append([fname_effect,
                                                       fname_sd])
    return results
Пример #3
0
def valid_dir(d):
    # type: (Dict) -> bool
    dir = d['path']
    if not path.exists(dir):
        return True
    if not path.isdir(dir):
        return False

    if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):  # type: ignore
        return False

    if d['sep']:
        dir = os.path.join('source', dir)
        if not path.exists(dir):
            return True
        if not path.isdir(dir):
            return False

    reserved_names = [
        'conf.py',
        d['dot'] + 'static',
        d['dot'] + 'templates',
        d['master'] + d['suffix'],
    ]
    if set(reserved_names) & set(os.listdir(dir)):  # type: ignore
        return False

    return True
Пример #4
0
def setup_worker(fp, server_addr, port, counter=0, verbose=False,
                             error_profile=None):
    """ This routine starts the worker.

    fp: fp that should be used to store intermediate data

    server_addr: ip address of server
    
    port: port on server to connect to
    
    counter: counts each round of denoising

    verbose: verbose flag

    error_profile: path to error profile .dat file
    """
    if fp==None:
        raise ValueError, "setup_worker needs file path for worker"
    log_fh=None
    if verbose:
        dir = dirname(fp+".log")
        if not exists(dir):
            makedirs(dir)
        log_fh = open(fp+".log","a",0)

    #use local tmp if possible
    new_fp = fp
    if exists("/tmp"):
        new_fp = "/tmp/" + split(fp)[1]    
    
    #set up the workers and start the loop
    worker = DenoiseWorker(new_fp, server_addr, port, counter=counter,
                            log_fh=log_fh, error_profile=error_profile)
    #this asyncore loop will run until the server closes the connection
    loop()
Пример #5
0
    def getdeps(self):
        """Download and Extract Sources"""
        for source in self.sources:
            self.log.info("")
            self.log.info("#####################################################")

            # Skip anything already extracted
            extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir))
            if exists(extractdir):
                self.log.warn("Deps Subdir: " + source.destsubdir + " already exists, skipping")
                continue

            extracted = False
            downloaded = source.download()
            if downloaded == False:
                self.log.error("Download Failed")
            else:
                extracted = source.extract()

            # Remove the archive file
            if source.destsubdir != "atmel-asf":
                source.remove_archivefile()

        # Re-jig the directories for those that need it
        for source in self.sources:
            source.movetoparent_multiple()
        return

        # Check for ASF Sources
        if not exists(join(self.DepsDirectory, "atmel-asf")):
            self.log.warn("There was no Atmel ASF Archive file found")
            self.log.warn("asf is not required but you can manually download the below file for the Atmel Source")
            self.log.warn("http://www.atmel.com/tools/avrsoftwareframework.aspx?tab=overview")
            self.log.warn("So far this is only used for porting mbed to sam based mcu's")
        return
def calc_ppmm(fnamep='', styr=0, enyr=0, model=''):
    if not path.exists(RootDir + "/" + model + "/junk"):
        system("mkdir -p " + RootDir + "/" + model + "/junk")
    fnp_nodir = split(fnamep, "/")[-1]
    nyrs = enyr - styr + 1
    for i in range(nyrs):
        y = styr + i
        fnp = OUTTEMP + "/" + model + "/junk/" + fnp_nodir + str(y) + ".nc"
        fnpr = RootDir + "/" + model + "/junk/" + fnp_nodir + str(y) + ".nc"
        fnpr = fnpr.replace('pr', 'prmm')
        if not path.exists(fnp):
            if y == enyr:
                print 'infile not found: ', fnp, ' ...skipping last year'
                break
            else:
                raise Exception('infile not found: ' + fnp)
        # calc pp in mm if not extist
        if not path.exists(fnpr):
            print "\n... calculating daily pp in mm for %s" % (path.basename(fnp))
            txt1 = "cdo -m 1e+20 -mulc,86400 %s tmpfile" % fnp
            print txt1
            system(txt1)
            txt2 = "ncatted -a units,pr,o,c,'mm/day' tmpfile"
            print txt2
            system(txt2)
            txt3 = "mv tmpfile " + fnpr
            print txt3
            system(txt3)
Пример #7
0
def _download_reference_files(conn):
    print('Downloading reference files')
    if not exists(reference_base_dir):
        mkdir(reference_base_dir)

    files = {'tree': (get_reference_fp('gg_13_8-97_otus.tree'),
                      'ftp://ftp.microbio.me/greengenes_release/'
                      'gg_13_8_otus/trees/97_otus.tree'),
             'taxonomy': (get_reference_fp('gg_13_8-97_otu_taxonomy.txt'),
                          'ftp://ftp.microbio.me/greengenes_release/'
                          'gg_13_8_otus/taxonomy/97_otu_taxonomy.txt'),
             'sequence': (get_reference_fp('gg_13_8-97_otus.fasta'),
                          'ftp://ftp.microbio.me/greengenes_release/'
                          'gg_13_8_otus/rep_set/97_otus.fasta')}

    for file_type, (local_fp, url) in viewitems(files):
        # Do not download the file if it exists already
        if exists(local_fp):
            print("SKIPPING %s: file already exists at %s. To "
                  "download the file again, erase the existing file first" %
                  (file_type, local_fp))
        else:
            try:
                urlretrieve(url, local_fp)
            except:
                raise IOError("Error: Could not fetch %s file from %s" %
                              (file_type, url))

    ref = Reference.create('Greengenes', '13_8', files['sequence'][0],
                           files['taxonomy'][0], files['tree'][0])

    _insert_processed_params(conn, ref)
Пример #8
0
def snapshot(source, destination, name=None):
    """Snapshot one directory to another. Specify names to snapshot small, named differences."""
    source = source + sep
    destination = destination + sep

    if not path.isdir(source):
        raise RuntimeError("source is not a directory")

    if path.exists(destination):
        if not path.isdir(destination):
            raise RuntimeError("destination is not a directory")

        if name is None:
            raise RuntimeError("can't snapshot base snapshot if destination exists")

    snapdir = path.join(destination, ".snapdir")
    
    if path.exists(path.join(source, ".snapdir")):
        raise RuntimeError("snapdir exists in source directory")

    if name is None:
        check_call(["rsync", "--del", "-av", source, destination])
        makedirs(snapdir)
    else:
        if not path.exists(snapdir):
            raise RuntimeError("No snapdir in destination directory")

        check_call(["rsync", "--del", "-av", "--only-write-batch={}".format(path.join(snapdir, name)), source, destination])
Пример #9
0
    def checkForSameFiles(self, starting=False):
        """ checks if same file was/is downloaded within same package

        :param starting: indicates that the current download is going to start
        :raises SkipDownload:
        """

        pack = self.pyfile.package()

        for pyfile in self.core.files.cache.values():
            if pyfile != self.pyfile and pyfile.name == self.pyfile.name and pyfile.package().folder == pack.folder:
                if pyfile.status in (0, 12): #finished or downloading
                    raise SkipDownload(pyfile.pluginname)
                elif pyfile.status in (
                5, 7) and starting: #a download is waiting/starting and was appenrently started before
                    raise SkipDownload(pyfile.pluginname)

        download_folder = self.config['general']['download_folder']
        location = save_join(download_folder, pack.folder, self.pyfile.name)

        if starting and self.core.config['download']['skip_existing'] and exists(location):
            size = os.stat(location).st_size
            if size >= self.pyfile.size:
                raise SkipDownload("File exists.")

        pyfile = self.core.db.findDuplicates(self.pyfile.id, self.pyfile.package().folder, self.pyfile.name)
        if pyfile:
            if exists(location):
                raise SkipDownload(pyfile[0])

            self.log.debug("File %s not skipped, because it does not exists." % self.pyfile.name)
Пример #10
0
 def test_arguments(self):
     # only pass one argument. spider script shouldn't be created
     self.assertEqual(2, self.call('genspider', 'test_name'))
     assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
     # pass two arguments <name> <domain>. spider script should be created
     self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
     assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
Пример #11
0
def find_module(mod):
    '''find the .ko file for kernel module named mod.
    Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
    modules directory and finally under the parent directory of
    the script '''
    # check $RTE_SDK/$RTE_TARGET directory
    if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
        path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
                                     os.environ['RTE_TARGET'], mod)
        if exists(path):
            return path

    # check using depmod
    try:
        depmod_out = check_output(["modinfo", "-n", mod], \
                                  stderr=subprocess.STDOUT).lower()
        if "error" not in depmod_out:
            path = depmod_out.strip()
            if exists(path):
                return path
    except: # if modinfo can't find module, it fails, so continue
        pass

    # check for a copy based off current path
    tools_dir = dirname(abspath(sys.argv[0]))
    if (tools_dir.endswith("tools")):
        base_dir = dirname(tools_dir)
        find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
        if len(find_out) > 0: #something matched
            path = find_out.splitlines()[0]
            if exists(path):
                return path
Пример #12
0
def start():
    """Start the CherryPy application server."""

    setupdir = dirname(dirname(__file__))
    curdir = os.getcwd()

    # First look on the command line for a desired config file,
    # if it's not on the command line, then look for 'setup.py'
    # in the current directory. If there, load configuration
    # from a file called 'dev.cfg'. If it's not there, the project
    # is probably installed and we'll look first for a file called
    # 'prod.cfg' in the current directory and then for a default
    # config file called 'default.cfg' packaged in the egg.
    if len(sys.argv) > 1:
        configfile = sys.argv[1]
    elif exists(join(setupdir, "setup.py")):
        configfile = join(setupdir, "dev.cfg")
    elif exists(join(curdir, "prod.cfg")):
        configfile = join(curdir, "prod.cfg")
    else:
        try:
            configfile = pkg_resources.resource_filename(
              pkg_resources.Requirement.parse("gordonweb"),
                "config/default.cfg")
        except pkg_resources.DistributionNotFound:
            raise ConfigurationError("Could not find default configuration.")

    turbogears.update_config(configfile=configfile,
        modulename="gordonweb.config")

    from gordonweb.controllers import Root

    turbogears.start_server(Root())
Пример #13
0
def checkifMCNPproject(directory,r):
    if not path.exists(path.join(directory, "cards")):
        print "\n\033[1;34mMCNPmanager error:\033[1;32m %s contains no MCNPmanager project\033[0m\n" % (directory)
        return r
    elif not path.exists(path.join(directory, "geom")):
        print "\n\033[1;34mMCNPmanager error:\033[1;32m %s contains no MCNPmanager project\033[0m\n" % (directory)
        return r
Пример #14
0
def find_pylintrc():
    """search the pylint rc file and return its path if it find it, else None
    """
    # is there a pylint rc file in the current directory ?
    if exists("pylintrc"):
        return abspath("pylintrc")
    if isfile("__init__.py"):
        curdir = abspath(os.getcwd())
        while isfile(join(curdir, "__init__.py")):
            curdir = abspath(join(curdir, ".."))
            if isfile(join(curdir, "pylintrc")):
                return join(curdir, "pylintrc")
    if "PYLINTRC" in os.environ and exists(os.environ["PYLINTRC"]):
        pylintrc = os.environ["PYLINTRC"]
    else:
        user_home = expanduser("~")
        if user_home == "~" or user_home == "/root":
            pylintrc = ".pylintrc"
        else:
            pylintrc = join(user_home, ".pylintrc")
            if not isfile(pylintrc):
                pylintrc = join(user_home, ".config", "pylintrc")
    if not isfile(pylintrc):
        if isfile("/etc/pylintrc"):
            pylintrc = "/etc/pylintrc"
        else:
            pylintrc = None
    return pylintrc
Пример #15
0
def git_checkout(repo_path, checkout_path, ref):
    ''' Check out a git repository to a given reference and path.
        
        This function is assumed to be run in a lock.
    '''
    jlogger.info('Checking out to ' + checkout_path)

    if not exists(checkout_path):
        mkdir(checkout_path)
    
    hash_file = checkout_path + '.commit-hash'
    commit_hash = get_ref_sha(repo_path, ref)
    
    do_checkout = True
    
    if exists(hash_file):
        previous_hash = open(hash_file).read().strip()
        
        if previous_hash == commit_hash:
            jlogger.debug('Skipping checkout to '+checkout_path)
            do_checkout = False

    if do_checkout:
        run_cmd(('git', '--work-tree='+checkout_path, 'checkout', ref, '--', '.'), repo_path)
    
    touch(checkout_path)
    
    with open(hash_file, 'w') as file:
        print >> file, commit_hash
Пример #16
0
def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
    """Copy python packages ``packages_names`` to ``dest``, spurious data.

    Copy will happen without tests, testdata, mercurial data or C extension module source with it.
    ``py2app`` include and exclude rules are **quite** funky, and doing this is the only reliable
    way to make sure we don't end up with useless stuff in our app.
    """
    if ISWINDOWS:
        create_links = False
    if not extra_ignores:
        extra_ignores = []
    ignore = shutil.ignore_patterns('.hg*', 'tests', 'testdata', 'modules', 'docs', 'locale', *extra_ignores)
    for package_name in packages_names:
        if op.exists(package_name):
            source_path = package_name
        else:
            mod = __import__(package_name)
            source_path = mod.__file__
            if mod.__file__.endswith('__init__.py'):
                source_path = op.dirname(source_path)
        dest_name = op.basename(source_path)
        dest_path = op.join(dest, dest_name)
        if op.exists(dest_path):
            if op.islink(dest_path):
                os.unlink(dest_path)
            else:
                shutil.rmtree(dest_path)
        print("Copying package at {0} to {1}".format(source_path, dest_path))
        if create_links:
            os.symlink(op.abspath(source_path), dest_path)
        else:
            if op.isdir(source_path):
                shutil.copytree(source_path, dest_path, ignore=ignore)
            else:
                shutil.copy(source_path, dest_path)
Пример #17
0
 def test_write_file(self):
     sys.argv = [sys.argv[0], test_md]
     target_file = path.join(curdir, 'test.rst')
     os.remove(target_file)
     self.assertFalse(path.exists(target_file))
     main()
     self.assertTrue(path.exists(target_file))
Пример #18
0
	def go(self):
		sel = self["list"].getCurrent()
		if sel is None:
			return
		if len(sel[0]) <= 1:
			return

		selectedhost = sel[0][2]
		selectedhostname = sel[0][1]

		self.hostcache_file = '/etc/enigma2/' + selectedhostname.strip() + '.cache' #Path to cache directory
		if os_path.exists(self.hostcache_file):
			try:
				self.hostdata = load_cache(self.hostcache_file)
			except:
				print 'load cache failed'
				pass

		if sel[0][0] == 'host': # host entry selected
			if selectedhost in self.expanded:
				self.expanded.remove(selectedhost)
			else:
				self.expanded.append(selectedhost)
			self.updateNetworkList()
		elif sel[0][0] == 'nfsShare': # share entry selected
			self.openMountEdit(sel[0])
		elif sel[0][0] == 'smbShare': # share entry selected
			if os_path.exists(self.hostcache_file):
				self.openMountEdit(sel[0])
			else:
				self.session.openWithCallback(self.passwordQuestion, MessageBox, (_("Do you want to enter a username and password for this host?") ) )
Пример #19
0
def _get_path(path, key, name):
    """Helper to get a dataset path"""
    if path is None:
        # use an intelligent guess if it's not defined
        def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
                                       'examples'))
        if get_config(key) is None:
            key = 'MNE_DATA'
        path = get_config(key, def_path)

        # use the same for all datasets
        if not op.exists(path) or not os.access(path, os.W_OK):
            try:
                os.mkdir(path)
            except OSError:
                try:
                    logger.info('Checking for %s data in '
                                '"~/mne_data"...' % name)
                    path = op.join(op.expanduser("~"), "mne_data")
                    if not op.exists(path):
                        logger.info("Trying to create "
                                    "'~/mne_data' in home directory")
                        os.mkdir(path)
                except OSError:
                    raise OSError("User does not have write permissions "
                                  "at '%s', try giving the path as an "
                                  "argument to data_path() where user has "
                                  "write permissions, for ex:data_path"
                                  "('/home/xyz/me2/')" % (path))
    if not isinstance(path, string_types):
        raise ValueError('path must be a string or None')
    return path
Пример #20
0
def install():
    ORIGIN=abspath(dirname(__file__))
    for dname in os.listdir(ORIGIN):
        if dname.startswith('.'):
            continue

        if not isdir(dname):
            continue

        with open(join(ORIGIN, dname, '.target'), 'r') as f:
            target_dir = f.read().strip()

        if not exists(target_dir):
            os.makedirs(target_dir)

        for phile in os.listdir(join(ORIGIN, dname)):
            if phile.startswith('.'):
                continue

            path_from = join(ORIGIN, dname, phile)

            if phile.startswith('_'):
                phile = '.%s' % phile[1:]

            path_to = join(target_dir, phile)
            if islink(path_to):
                continue

            if exists(path_to):
                os.rename(path_to, "%s_" % path_to)

            os.symlink(path_from, path_to)
Пример #21
0
 def getValue(self):
     service = self.source.service
     info = service and service.info()
     if not info:
         return -1
     elif self.type == self.XRES:
         video_width = None
         if path.exists('/proc/stb/vmpeg/0/xres'):
             f = open('/proc/stb/vmpeg/0/xres', 'r')
             video_width = int(f.read(), 16)
             f.close()
         if not video_width:
             video_width = info.getInfo(iServiceInformation.sVideoWidth)
         return str(video_width)
     elif self.type == self.YRES:
         video_height = None
         if path.exists('/proc/stb/vmpeg/0/yres'):
             f = open('/proc/stb/vmpeg/0/yres', 'r')
             video_height = int(f.read(), 16)
             f.close()
         if not video_height:
             video_height = info.getInfo(iServiceInformation.sVideoHeight)
         return str(video_height)
     elif self.type == self.FRAMERATE:
         video_rate = None
         if path.exists('/proc/stb/vmpeg/0/framerate'):
             f = open('/proc/stb/vmpeg/0/framerate', 'r')
             video_rate = f.read()
             f.close()
         if not video_rate:
             video_rate = info.getInfo(iServiceInformation.sFrameRate)
         return str(video_rate)
     else:
         return -1
Пример #22
0
def resync_engine_file():
    user_config = path.join(get_userhome(), '.config',
                            'ibus-anthy', 'engines.xml')
    system_config = path.join(config.PKGDATADIR, 'engine', 'default.xml')
    if not path.exists(user_config):
        return
    if not path.exists(system_config):
        os.unlink(user_config)
        return

    # path.getmtime depends on the build time rather than install time.
    def __get_engine_file_version(engine_file):
        version_str = ''
        dom = xml.dom.minidom.parse(engine_file)
        elements = dom.getElementsByTagName('version')
        nodes = []
        if len(elements) > 0:
            nodes = elements[0].childNodes
        if len(nodes) > 0:
            version_str = nodes[0].data
        if version_str != '':
            version_str = version_str.strip()
        return version_str

    user_config_version = __get_engine_file_version(user_config)
    system_config_version = __get_engine_file_version(system_config)
    if system_config_version > user_config_version:
        import shutil
        shutil.copyfile(system_config, user_config)
Пример #23
0
    def ensure_bootstrapped(self, target=None):
        if self.context.bootstrapped:
            return

        rust_root = self.config["tools"]["rust-root"]
        rustc_path = path.join(
            rust_root, "rustc", "bin", "rustc" + BIN_SUFFIX
        )
        rustc_binary_exists = path.exists(rustc_path)

        base_target_path = path.join(rust_root, "rustc", "lib", "rustlib")
        target_exists = True
        if target is not None:
            target_path = path.join(base_target_path, target)
            target_exists = path.exists(target_path)

        if not (self.config['tools']['system-rust'] or (rustc_binary_exists and target_exists)):
            print("looking for rustc at %s" % (rustc_path))
            Registrar.dispatch("bootstrap-rust", context=self.context, target=filter(None, [target]))

        cargo_path = path.join(self.config["tools"]["cargo-root"], "cargo", "bin",
                               "cargo" + BIN_SUFFIX)
        cargo_binary_exists = path.exists(cargo_path)

        if not self.config["tools"]["system-cargo"] and not cargo_binary_exists:
            Registrar.dispatch("bootstrap-cargo", context=self.context)

        self.context.bootstrapped = True
Пример #24
0
    def test_ampliconnoise_install(self):
        """ AmpliconNoise install looks sane."""
        url = "http://qiime.org/install/install.html#ampliconnoise-install-notes"

        pyro_lookup_file = getenv('PYRO_LOOKUP_FILE')
        self.assertTrue(pyro_lookup_file is not None,
                        "$PYRO_LOOKUP_FILE variable is not set. See %s for help." % url)
        self.assertTrue(exists(pyro_lookup_file),
                        "$PYRO_LOOKUP_FILE variable is not set to an existing filepath.")

        seq_lookup_file = getenv('SEQ_LOOKUP_FILE')
        self.assertTrue(seq_lookup_file is not None,
                        "$SEQ_LOOKUP_FILE variable is not set. See %s for help." % url)
        self.assertTrue(exists(seq_lookup_file),
                        "$SEQ_LOOKUP_FILE variable is not set to an existing filepath.")

        self.assertTrue(which("SplitKeys.pl"),
                        "Couldn't find SplitKeys.pl. " +
                        "Perhaps AmpliconNoise Scripts directory isn't in $PATH?" +
                        " See %s for help." % url)

        self.assertTrue(which("FCluster"),
                        "Couldn't find FCluster. " +
                        "Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
                        " See %s for help." % url)

        self.assertTrue(which("Perseus"),
                        "Couldn't find Perseus. " +
                        "Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
                        " See %s for help." % url)
Пример #25
0
    def processMovieStep(self, movieId, movieFn, *args):
        movieFolder = self._getMovieFolder(movieId)
        movieName = basename(movieFn)

        if self._filterMovie(movieId, movieFn):
            makePath(movieFolder)
            createLink(movieFn, join(movieFolder, movieName))
            toDelete = [movieName]
    
            if movieName.endswith('bz2'):
                # We assume that if compressed the name ends with .mrc.bz2
                movieMrc = movieName.replace('.bz2', '')
                toDelete.append(movieMrc)
                if not exists(movieMrc):
                    self.runJob('bzip2', '-d -f %s' % movieName, cwd=movieFolder)

            elif movieName.endswith('tbz'):
                # We assume that if compressed the name ends with .tbz
                movieMrc = movieName.replace('.tbz', '.mrc')
                toDelete.append(movieMrc)
                if not exists(movieMrc):
                    self.runJob('tar', 'jxf %s' % movieName, cwd=movieFolder)
            else:
                movieMrc = movieName
            
            self.info("Processing movie: %s" % movieMrc)
            
            self._processMovie(movieId, movieMrc, movieFolder, *args)
            
            if self.cleanMovieData:
                print "erasing.....movieFolder: ", movieFolder
                os.system('rm -rf %s' % movieFolder)
#                 cleanPath(movieFolder)
            else:
                self.info('Clean movie data DISABLED. Movie folder will remain in disk!!!')
Пример #26
0
        def _xcassets(self, title, command):
            parser = argparse.ArgumentParser(
                    description="Generate {} for your project".format(title))
            parser.add_argument("filename", help="Path to your project or xcodeproj")
            parser.add_argument("image", help="Path to your initial {}.png".format(title.lower()))
            args = parser.parse_args(sys.argv[2:])

            if not exists(args.image):
                print("ERROR: image path does not exists.")
                return

            filename = args.filename
            if not filename.endswith(".xcodeproj"):
                # try to find the xcodeproj
                from glob import glob
                xcodeproj = glob(join(filename, "*.xcodeproj"))
                if not xcodeproj:
                    print("ERROR: Unable to find a xcodeproj in {}".format(filename))
                    sys.exit(1)
                filename = xcodeproj[0]

            project_name = filename.split("/")[-1].replace(".xcodeproj", "")
            images_xcassets = realpath(join(filename, "..", project_name,
                "Images.xcassets"))
            if not exists(images_xcassets):
                print("WARNING: Images.xcassets not found, creating it.")
                makedirs(images_xcassets)
            print("Images.xcassets located at {}".format(images_xcassets))

            command(images_xcassets, args.image)
Пример #27
0
def main(sudo, pre=False, repos=None):
    photon = Photon(
        dict(sudo=sudo, repos=repos),
        config=None,
        meta='photon_selfupgrade.json'
    )
    settings = photon.settings.get
    if settings['repos']:
        for repo in settings['repos']:
            if path.exists(repo) and path.exists(path.join(repo, '.git')):
                photon.git_handler(repo)._pull()
            else:
                photon.m('skipping non repo', more=dict(repo=repo))

    upres = photon.m(
        'attempting selfupgrade',
        cmdd=dict(
            cmd='%s pip3 install -U %s photon_core' % (
                'sudo' if sudo else '',
                '--pre' if pre else ''
            )
        ),
        more=dict(sudo=sudo),
        critical=False
    )
    if upres.get('returncode') == 0:
        photon.m('all went well')
    else:
        photon.m('I am dead! ' * 23, state=True)
def map_reads_indels(reference):
    """
    :return: check unmapped reads for indels
    """
    if exists(INDEL_file_path):
        logger.info("Loading INDELs from {}".format(INDEL_file_path))
        indels = cPickle.load(open(INDEL_file_path, "rb"))
        return indels

    if exists(unmapped_read_file_path):
        logger.info("Loading Unmapped Reads from {}".format(unmapped_read_file_path))
        unmapped_reads = cPickle.load(open(unmapped_read_file_path, "rb"))
        logger.info("Checking {} unmapped reads for insertions / deletions".format(len(unmapped_reads)))
        start = time.clock()

        for count, unmapped in enumerate(unmapped_reads):
            if count % 1000 == 0 and count != 0:
                logger.info("Mapping reads to reference. Completed {} %".format(
                    str(100 * count / float(len(unmapped_reads)))[:5]))
                logger.info("Reads remaining : {0}".format(str(len(unmapped_reads) - count)))

            check_for_indels(unmapped[0], reference, unmapped[1], unmapped[2])

        ins = get_insertions()
        DEL = get_deletions()

        logger.info("Dumping INDELs at {}".format(INDEL_file_path))
        cPickle.dump((ins, DEL), open(INDEL_file_path, "wb"))

        return ins, DEL
    else:
        logger.info("Run STR Code first to get unmapped reads")
        exit()
Пример #29
0
def test(ctx, projects):
    """
    Test the source distribution
    """
    if not exists(dist_dir):
        ctx.invoke(sdist)

    with empty_pythonpath():
        print("Creating virtualenv for test install...")
        if exists(test_venv_dir):
            shutil.rmtree(test_venv_dir)
        subprocess.check_call(["virtualenv", "-q", test_venv_dir])

        pip = join(test_venv_dir, venv_bin, "pip")
        with chdir(dist_dir):
            for project in projects:
                print("Installing %s..." % project)
                dist = glob.glob("./%s*" % project)[0]
                subprocess.check_call([pip, "install", "-q", dist])

            print("Running binaries...")
            for project in projects:
                for tool in tools[project]:
                    tool = join(test_venv_dir, venv_bin, tool)
                    print(tool)
                    print(subprocess.check_output([tool, "--version"]))

            print("Virtualenv available for further testing:")
            print("source %s" % normpath(join(test_venv_dir, venv_bin, "activate")))
Пример #30
0
	def __init__(self, session, parent):
		Screen.__init__(self, session, parent=parent)
		self["selected"] = StaticText("ViX:" + getImageVersion())

		AboutText = _("Model: %s %s\n") % (getMachineBrand(), getMachineName())

		if path.exists('/proc/stb/info/chipset'):
			chipset = open('/proc/stb/info/chipset', 'r').read()
			AboutText += _("Chipset: BCM%s") % chipset.replace('\n', '') + "\n"

		AboutText += _("Version: %s") % getImageVersion() + "\n"
		AboutText += _("Build: %s") % getImageBuild() + "\n"
		AboutText += _("Kernel: %s") % about.getKernelVersionString() + "\n"

		string = getDriverDate()
		year = string[0:4]
		month = string[4:6]
		day = string[6:8]
		driversdate = '-'.join((year, month, day))
		AboutText += _("Drivers: %s") % driversdate + "\n"
		AboutText += _("Last update: %s") % getEnigmaVersionString() + "\n\n"

		tempinfo = ""
		if path.exists('/proc/stb/sensors/temp0/value'):
			tempinfo = open('/proc/stb/sensors/temp0/value', 'r').read()
		elif path.exists('/proc/stb/fp/temp_sensor'):
			tempinfo = open('/proc/stb/fp/temp_sensor', 'r').read()
		if tempinfo and int(tempinfo.replace('\n', '')) > 0:
			mark = str('\xc2\xb0')
			AboutText += _("System temperature: %s") % tempinfo.replace('\n', '') + mark + "C\n\n"

		self["AboutText"] = StaticText(AboutText)
Пример #31
0
# -*- coding: utf-8 -*-
# @Time    :  2019/7/19 11:03
# @Author  : songxy
# @Email   : [email protected]
# @File    : 4.os模块.py

from os import path

p = path.join('D:/', 'svn', 'ResourceManage', '00密码管理')

p = path.abspath(__file__)

print(p, type(p))

print(path.exists(p))
print(path.abspath(p))

print(path.split(path.abspath(p)))

print(path.dirname(path.abspath(p)))
print(path.basename(path.abspath(p)))
Пример #32
0
def answer():
    if request.method == 'GET':
        records = getrecords()
        selected = {}
        for ns, l in records.items():  # iterate over filenumbers
            if ns in selected or 'd' in l:  #   and filename suffix [l]ist
                continue  # already did this filenumber
            elif not 'a' in l:
                selected[ns] = 'a'  # question needs an answer
            elif not ('e' in l or 'o' in l):
                selected[ns] = 'r'  # answer needs review
            elif 'o' in l and not 't' in l:
                selected[ns] = 't'  # opposing review needs tie-breaker
        if len(selected) < 1:
            flash('No open questions remaining to answer or review.')
            return redirect(url_for('index'))
        chosen = choice(selected.keys())  # pick a question at random
        needs = selected[chosen]  # type of response needed
        files = {}  # files' contents in a suffix
        for suffix in records[chosen]:  # iterate over the files available
            with open(recdir + chosen + suffix, 'r') as f:
                files[suffix] = sub(r'--REGISTRATION-ID:.*--$', '',
                                    f.read())  # read textual contents of each
        logdate = datetime.strftime(date.today(), '%Y-%m-%d')
        logfn = './logs/activity-' + logdate
        user = '******'  # Username is Anonymous by default
        if 'token' in session:
            token = session['token']
            tokenfilename = 'registered/' + token
            with open(
                    tokenfilename, 'r'
            ) as f:  # for getting username associated with the set token
                user = f.readline()[:-1]
        with open(
                logfn, 'a'
        ) as f:  # logging username, IP addr, end-point, request type, question number, required answer
            log = user + ' ' + request.environ[
                'REMOTE_ADDR'] + ' answer' + ' GET' + ' ' + chosen + ' ' + needs + '\n'
            f.write(log)
        return render_template('answer.html',
                               record=chosen,
                               response=needs,
                               files=files)  # invoke the template
    elif request.method == 'POST':
        record = request.form['record']  # file number with zeroes
        response = request.form[
            'response']  # [submit button] 1 of: a,e,o,te,to
        answer = linkandescape(request.form['answer'])
        answer = answer + frameurl(request.form['iframeurl'])
        ### sanity-check size
        if response in ['te', 'to']:  # tie breaker
            if path.exists(recdir + record + 't'):
                flash('Someone else just submitted that tiebreaker.')
                return redirect(url_for('index'))
                ###@@@ SLOW RACE: see below
            rename(recdir + record + 'o', recdir + record + 't')
            response = response[1]  # 2nd character
        fn = recdir + record + response  # filename ### sanity check?
        if path.exists(fn):  # does file exist?
            flash('Someone else just submitted the requested response.')
            return redirect(url_for('index'))
            ###@@@ SLOW RACE: lock choice() responses below for some time?
        with open(fn, 'w') as f:
            f.write(answer + '\n')  ### only add \n if not already at end?
            if 'token' in session:
                f.write('--REGISTRATION-ID:' + session['token'] + '--')
        logdate = datetime.strftime(date.today(), '%Y-%m-%d')
        logfn = './logs/activity-' + logdate
        user = '******'  # Username is Anonymous by default
        if 'token' in session:
            token = session['token']
            tokenfilename = 'registered/' + token
            with open(
                    tokenfilename, 'r'
            ) as f:  # for getting username associated with the set token
                user = f.readline()[:-1]
        with open(
                logfn, 'a'
        ) as f:  # logging username, IP addr, end-point, request type, question number, given answer
            log = user + ' ' + request.environ[
                'REMOTE_ADDR'] + ' answer' + ' POST' + ' ' + record + ' ' + response + '\n'
            f.write(log)
        if 'token' in session:
            userfile = 'registered/' + session['token']
            if path.exists(fn):
                with open(userfile, 'a') as f:
                    f.write(fn[8:] + '\n')
        flash('Thank you for your response.')  # displays in layout.html
        return redirect(url_for('index'))
Пример #33
0
q2_seq = token.texts_to_sequences(q2_l)
word_indx = token.word_index
dict_word_indx = word_indx
with open(path + 'word_indx.json', 'w') as dfile:
    json.dump(dict_word_indx, dfile)

#download GLOVE embeddings:
# Citations: Stackoverflow.com,'http://nlp.stanford.edu/data/glove.840B.300d.zip'
"""
i/p: GloVe
o/p: embeddings_index dict
"""

GLOVE_URL = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'

if not exists(expanduser('~/.keras/datasets/glove.840B.300d.zip')):
    zipfile = ZipFile(get_file('glove.840B.300d.zip', GLOVE_URL))
    zipfile.extract('glove.840B.300d.txt',
                    path=expanduser('~/.keras/datasets/'))

print("Processing", 'glove.840B.300d.txt')

embed_indx = {}

with open(expanduser('~/.keras/datasets/glove.840B.300d.txt'),
          encoding='utf-8') as filee:
    for l in filee:
        val = l.split(' ')
        w = val[0]
        embed = np.asarray(val[1:], dtype='float32')
        embed_indx[w] = embed
from marker_detection.msg import pre_detection as pre_det_msg
from marker_detection.msg import marker_detection_result as md_result
from marker_detection.msg import bbox as bbox_msg



ros_root = os.getcwd()
FRCN_root = ros_root + "/../"

CLASSES = ('__background__',
           'marker')


# Re-make a folder for saved images
saved_img_path = ros_root + "/src/marker_detection/detected_img/"
if osp.exists(saved_img_path):
    shutil.rmtree(saved_img_path)
    os.makedirs(saved_img_path)
else:
    os.makedirs(saved_img_path)


def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]

    # Case that nothing detected
    num_saved_img = len(os.listdir(saved_img_path))
    if len(inds) == 0:
        cv2.imwrite(saved_img_path + str(num_saved_img) + ".png", im)
        # publish image message
Пример #35
0
 def _proxy_exists(self, path):
     # TODO: decide either it should may be retrieved right away.
     # For now, as long as it is a symlink pointing to under .git/annex
     if exists(path):
         return True
     return lexists(path) and 'annex/objects' in str(realpath(path))
Пример #36
0
data_path = sample.data_path()
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
            'Visual/Right': 4, 'Smiley': 5, 'Button': 32}

raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
events_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
output_path = op.join(data_path, '..', 'MNE-sample-data-bids')

# %%
# To ensure the output path doesn't contain any leftover files from previous
# tests and example runs, we simply delete it.
#
# .. warning:: Do not delete directories that may contain important data!
#

if op.exists(output_path):
    shutil.rmtree(output_path)

# %%
#
# .. note::
#
#   ``mne-bids`` will try to infer as much information from the data as
#   possible to then save this data in BIDS-specific "sidecar" files. For
#   example the manufacturer information, which is inferred from the data file
#   extension. However, sometimes inferring is ambiguous (e.g., if your file
#   format is non-standard for the manufacturer). In these cases, MNE-BIDS does
#   *not* guess and you will have to update your BIDS fields manually.
#
# Based on our path definitions above, we read the raw data file, define
# a new BIDS name for it, and then run the automatic BIDS conversion.
Пример #37
0
    def save_html(self, filepath=None, overwrite=False, js_source='web',
                  protocol='https', menu='all', scroll_behavior='pan',
                  enable_editing=True, enable_keys=True, minified_js=True,
                  never_ask_before_quit=False, static_site_index_json=None):
        """Save an HTML file containing the map.

        :param string filepath:

            The HTML file will be saved to this location. When js_source is
            'local', then a new directory will be created with this name.

        :param Boolean overwrite:

            Overwrite existing files.

        :param string js_source:

            Can be one of the following:

            - *web* (Default) - Use JavaScript files from escher.github.io.
            - *local* - Use compiled JavaScript files in the local Escher
                        installation. Works offline. To make the dependencies
                        available to the downloaded file, a new directory will
                        be made with the name specified by filepath.
            - *dev* - No longer necessary with source maps. This now gives the
                      same behavior as 'local'.

        :param string protocol:

            The protocol can be 'http', 'https', or None which indicates a
            'protocol relative URL', as in //escher.github.io. Ignored if source
            is local.

        :param string menu: Menu bar options include:

            - *none* - No menu or buttons.
            - *zoom* - Just zoom buttons.
            - *all* (Default) - Menu and button bar (requires Bootstrap).

        :param string scroll_behavior: Scroll behavior options:

            - *pan* - Pan the map.
            - *zoom* - Zoom the map.
            - *none* (Default) - No scroll events.

        :param Boolean enable_editing: Enable the map editing modes.

        :param Boolean enable_keys: Enable keyboard shortcuts.

        :param Boolean minified_js:

            If True, use the minified version of JavaScript and CSS files.

        :param number height: Height of the HTML container.

        :param Boolean never_ask_before_quit:

            Never display an alert asking if you want to leave the page. By
            default, this message is displayed if enable_editing is True.

        :param string static_site_index_json:

            The index, as a JSON string, for the static site. Use javascript
            to parse the URL options. Used for generating static pages (see
            static_site.py).

        """

        filepath = expanduser(filepath)

        if js_source in ['local', 'dev']:
            if filepath is None:
                raise Exception('Must provide a filepath when js_source is not "web"')

            # make a directory
            directory = re.sub(r'\.html$', '', filepath)
            if exists(directory):
                if not overwrite:
                    raise Exception('Directory already exists: %s' % directory)
            else:
                os.makedirs(directory)
            # add dependencies to the directory
            escher = get_url('escher_min' if minified_js else 'escher', 'local')
            builder_css = get_url('builder_css_min' if minified_js else 'builder_css', 'local')
            d3 = get_url('d3', 'local')
            favicon = get_url('favicon', 'local')
            if menu == 'all':
                boot_css = get_url('boot_css', 'local')
                boot_js = get_url('boot_js', 'local')
                jquery = get_url('jquery', 'local')
                boot_font_eot = get_url('boot_font_eot', 'local')
                boot_font_svg = get_url('boot_font_svg', 'local')
                boot_font_woff = get_url('boot_font_woff', 'local')
                boot_font_woff2 = get_url('boot_font_woff2', 'local')
                boot_font_ttf = get_url('boot_font_ttf', 'local')
            else:
                boot_css = boot_js = jquery = None
                boot_font_eot = boot_font_svg = None
                boot_font_woff = boot_font_woff2 = boot_font_ttf = None

            for path in [escher, builder_css, boot_css, boot_js, jquery, d3,
                         favicon, boot_font_eot, boot_font_svg, boot_font_woff,
                         boot_font_woff2, boot_font_ttf]:
                if path is None:
                    continue
                src = join(root_directory, path)
                dest = join(directory, path)
                dest_dir = dirname(dest)
                if not exists(dest_dir):
                    os.makedirs(dest_dir)
                shutil.copy(src, dest)
            filepath = join(directory, 'index.html')
        else:
            if not filepath.endswith('.html'):
                filepath += '.html'
            if exists(filepath) and not overwrite:
                raise Exception('File already exists: %s' % filepath)

        html = self._get_html(js_source=js_source, menu=menu,
                              scroll_behavior=scroll_behavior,
                              html_wrapper=True, enable_editing=enable_editing,
                              enable_keys=enable_keys, minified_js=minified_js,
                              fill_screen=True, height="100%",
                              never_ask_before_quit=never_ask_before_quit,
                              static_site_index_json=static_site_index_json,
                              protocol=protocol)
        if filepath is not None:
            with open(filepath, 'wb') as f:
                f.write(html.encode('utf-8'))
            return filepath
        else:
            from tempfile import mkstemp
            from os import write, close
            os_file, filename = mkstemp(suffix=".html", text=False) # binary
            write(os_file, html.encode('utf-8'))
            close(os_file)
            return filename
Пример #38
0
    help="Number of Frames per second.",
    default=15,
)
parser.add_argument(
    "-o",
    "--output",
    dest="output_path",
    type=str,
    help="Output path for the resultant sildeshow.",
    default="output.mp4",
)

args = parser.parse_args()
flag = 1

if not path.exists(args.path_to_images):
    flag = "Invalid path: " + args.path_to_images
if not path.isfile(music_file_path):
    flag = "Invalid path: " + args.path_to_music
if args.transition_type not in allowed_transition_types:
    print("Invalid transition type.")
    print(
        "Following are the list of allowed transition types:",
        *allowed_transition_types,
        sep="\n\t\t"
    )
    print("\n\nFor more clarity visit: https://trac.ffmpeg.org/wiki/Xfade\n\n")
    exit(2)
try:
    if args.path_to_music[-3:] not in allowed_sound_formats:
        print("Music file format invalid.")
Пример #39
0
def initialize_VPN(stored_settings=0,save=0,area_input=None):

    ###load stored settings if needed and set input_needed variables to zero if settings are provided###
    additional_settings_needed = 1
    additional_settings_list = list()
    if stored_settings == 1:
        instructions = saved_settings_check()
        additional_settings_needed = 0
        input_needed = 0
    elif area_input is not None:
        input_needed = 2
    else:
        input_needed = 1


    ###performing system check###
    opsys = platform.system()

    ##windows##
    if opsys == "Windows":
        print("\33[33mYou're using Windows.\n"
              "Performing system check...\n"
              "###########################\n\33[0m")
        #seek and set windows installation path#
        option_1_path = 'C:/Program Files/NordVPN'
        option_2_path = 'C:/Program Files (x86)/NordVPN'
        custom_path = str()
        if path.exists(option_1_path) == True:
            cwd_path = option_1_path
        elif path.exists(option_2_path) == True:
            cwd_path = option_2_path
        else:
            custom_path = input("\x1b[93mIt looks like you've installed NordVPN in an uncommon folder. Would you mind telling me which folder? (e.g. D:/customfolder/nordvpn)\x1b[0m")
            while path.exists(custom_path) == False:
                custom_path = input("\x1b[93mI'm sorry, but this folder doesn't exist. Please double-check your input.\x1b[0m")
            while os.path.isfile(custom_path+"/NordVPN.exe") == False:
                custom_path = input("\x1b[93mI'm sorry, but the NordVPN application is not located in this folder. Please double-check your input.\x1b[0m")
            cwd_path = custom_path
        print("NordVPN installation check: \33[92m\N{check mark}\33[0m")

        #check if nordvpn service is already running in the background
        check_service = "nordvpn-service.exe" in (p.name() for p in psutil.process_iter())
        if check_service is False:
            raise Exception("NordVPN service hasn't been initialized, please start this service in [task manager] --> [services] and restart your script")
        print("NordVPN service check: \33[92m\N{check mark}\33[0m")

        # start NordVPN app and disconnect from VPN service if necessary#
        print("Opening NordVPN app and disconnecting if necessary...")
        open_nord_win = subprocess.Popen(["nordvpn", "-d"],shell=True,cwd=cwd_path,stdout=DEVNULL)
        while ("NordVPN.exe" in (p.name() for p in psutil.process_iter())) == False:
            time.sleep(3)
        open_nord_win.kill()
        print("NordVPN app launched: \33[92m\N{check mark}\33[0m")
        print("#####################################")

    ##linux##
    elif opsys == "Linux":
        print("\n\33[33mYou're using Linux.\n"
              "Performing system check...\n"
              "###########################\n\33[0m")

        #check if nordvpn is installed on linux#
        check_nord_linux = check_output(["nordvpn"])
        if len(check_nord_linux) > 0:
            print("NordVPN installation check: \33[92m\N{check mark}\33[0m")
        else:
            raise Exception("NordVPN is not installed on your Linux machine.\n"
                  "Follow instructions on shorturl.at/ioDQ2 to install the NordVpn app.")

        #check if user is logged in. If not, ask for credentials and log in or use credentials from stored settings if available.#
        check_nord_linux_acc = str(check_output(["nordvpn","account"]))
        if "not logged in" in check_nord_linux_acc:
            login_needed = 1
            while login_needed == 1:
                login_message = input("\n\033[34mYou are not logged in. Please provide your credentials in the form of LOGIN/PASSWORD\n\033[0m")
                try:
                    if instructions['credentials'] in locals():
                        credentials = stored_settings['credentials']
                    else:
                        credentials = login_message
                except:
                    credentials = login_message
                finally:
                    try:
                        login = credentials.split("/")[0]
                        password = credentials.split("/")[1]
                    except IndexError:
                        error_login = input("\n\033[34mYou have provided your credentials in the wrong format. Press enter and please try again.\n"
                              "Your input should look something like this: [email protected]/password\033[0m")
                    else:
                        login_needed = 0
            try:
                login_nordvpn = check_output(["nordvpn","login","-u",login,"-p",password])
            except subprocess.CalledProcessError:
                raise Exception("\nSorry,something went wrong while trying to log in\n")
            if "Welcome" in str(login_nordvpn):
                print("\n\n\033[34mLogin successful!\n\033[0m\n")
                pass
            else:
                raise Exception("\nSorry, NordVPN throws an unexpected message, namely:\n"+str(login_nordvpn))
        else:
            print("NordVPN login check: \33[92m\N{check mark}\33[0m")

        #provide opportunity to execute additional settings.#
        settings_input_message = "\n\033[34mDo you want to execute additional settings?\033[0m"
        while additional_settings_needed == 1:
            additional_settings = input(settings_input_message+
                                        "\n_________________________\n\n"
                                        "Press enter to continue\n"
                                        "Type 'help' for available options\n").strip()
            if additional_settings == "help":
                options_linux = pkg_resources.open_text(NordVPN_options, 'options_linux.txt').read().split('\n')
                for line in options_linux:
                    print(line)
                additional_settings = input("").strip()

            additional_settings = str(additional_settings).split(" ")
            if len(additional_settings[0]) > 0:
                settings_input_message = additional_settings_linux(additional_settings)
                if any(re.findall(r'done|already been executed', settings_input_message,re.IGNORECASE)):
                    additional_settings_list.append(additional_settings)
            else:
                additional_settings_needed = 0

        #however, if provided, just skip the additional settings option and execute the stored settings.#
        if 'instructions' in locals():
            try:
                if len(instructions['additional_settings'][0][0]) > 0:
                    print("Executing stored additional settings....\n")
                    for count,instruction in enumerate(instructions['additional_settings']):
                        print("Executing stored setting #"+str(count+1)+": "+str(instruction))
                        additional_settings_linux(instruction)
                else:
                    pass
            except Exception:
                pass

    else:
        raise Exception("I'm sorry, NordVPN switcher only works for Windows and Linux machines.")

    ###provide settings for VPN rotation###

    ##open available options and store these in a dict##
    areas_list = pkg_resources.open_text(NordVPN_options, 'countrylist.txt').read().split('\n')
    country_dict = {'countries':areas_list[0:60],'europe': areas_list[0:36], 'americas': areas_list[36:44],
                    'africa east india': areas_list[49:60],'asia pacific': areas_list[49:60],
                    'regions australia': areas_list[60:65],'regions canada': areas_list[65:68],
                    'regions germany': areas_list[68:70], 'regions india': areas_list[70:72],
                    'regions united states': areas_list[72:87],'special groups':areas_list[87:len(areas_list)]}

    ##provide input if needed##
    while input_needed > 0:
        if input_needed == 2:
            print("\nYou've entered a list of connection options. Checking list...\n")
            try:
                settings_servers = [area.lower() for area in area_input]
                settings_servers = ",".join(settings_servers)
            except TypeError:
                raise Exception("I expected a list here. Are you sure you've not entered a string or some other object?\n ")

        else:
            settings_servers = input("\n\033[34mI want to connect to...\n"
                                 "_________________________\n"
                                 "Type 'help' for available options\n\033[0m").strip().lower()
        #define help menu#
        if settings_servers.lower().strip() == 'help':
            #notation for specific servers differs between Windows and Linux.#
            if opsys == "Windows":
                notation_specific_server = " (e.g. Netherlands #742,Belgium #166)\n"
            else:
                notation_specific_server = " (e.g. nl742,be166)\n"

            settings_servers = input("\nOptions:\n"
                  "##########\n"
                  "* type 'quick' to choose quickconnect \n"
                  "* Single country or local region (e.g.Germany)\n"
                  "* Regions within country (e.g. regions united states')\n"
                  "* World regions (europe/americas/africa east india/asia pacific)\n"
                  "* Random multiple countries and/or local regions (e.g.France,Netherlands,Chicago)\n"
                  "* Random n countries (e.g. random countries 10)\n"
                  "* Random n countries within larger region (e.g. random countries europe 5)\n"
                  "* Random n regions in country (e.g. random regions United States 6)\n"\
                  "* Specialty group name (e.g. Dedicated IP,Double VPN)\n"
                  "* Specific list of servers"+notation_specific_server).strip().lower()

        #set base command according to running os#
        if opsys == "Windows":
            nordvpn_command = ["nordvpn", "-c"]
        if opsys == "Linux":
            nordvpn_command = ["nordvpn", "c"]

        #create sample of regions from input.#
        #1. if quick connect#
        if settings_servers == "quick":
            if input_needed == 1:
                quickconnect_check = input("\nYou are choosing for the quick connect option. Are you sure? (y/n)\n")
                if 'y' in quickconnect_check:
                    sample_countries = [""]
                    input_needed = 0
                    pass
            else:
                print("\nYou are choosing for the quick connect option.\n")

        #2. if provided specific servers. Notation differs for Windows and Linux machines, so two options are checked (first is Windows, second is Linux#
        elif "#" in settings_servers or re.compile(r'^[a-zA-Z]+[0-9]+').search(settings_servers.split(',')[0]) is not None:
            if opsys == "Windows":
                nordvpn_command.append("-n")
            sample_countries = [area.strip() for area in settings_servers.split(',')]
            input_needed = 0
        else:
            #3. If connecting to some specific group of servers#
            if opsys == "Windows":
                nordvpn_command.append("-g")
            #3.1 if asked for random sample, pull a sample.#
            if "random" in settings_servers:
                #determine sample size#
                samplesize = int(re.sub("[^0-9]", "", settings_servers).strip())
                #3.1.1 if asked for random regions within country (e.g. random regions from United States,Australia,...)#
                if "regions" in settings_servers:
                    try:
                        sample_countries = country_dict[re.sub("random", "", settings_servers).rstrip('0123456789.- ').lower().strip()]
                        input_needed = 0
                    except:
                        input("\n\nThere are no specific regions available in this country, please try again.\nPress enter to continue.\n")
                        if input_needed == 2:
                            input_needed = 1
                            continue
                    if re.compile(r'[^0-9]').search(settings_servers.strip()):
                        sample_countries = random.sample(sample_countries, samplesize)
                #3.1.2 if asked for random countries within larger region#
                elif any(re.findall(r'europe|americas|africa east india|asia pacific', settings_servers)):
                    larger_region = country_dict[re.sub("random|countries", "", settings_servers).rstrip('0123456789.- ').lower().strip()]
                    sample_countries = random.sample(larger_region,samplesize)
                    input_needed = 0
                #3.1.3 if asked for random countries globally#
                else:
                    if re.compile(r'[^0-9]').search(settings_servers.strip()):
                        sample_countries = random.sample(country_dict['countries'], samplesize)
                        input_needed = 0
                    else:
                        sample_countries = country_dict['countries']
                        input_needed = 0
            #4. If asked for specific region (e.g. europe)#
            elif settings_servers in country_dict.keys():
                sample_countries = country_dict[settings_servers]
                input_needed = 0
            #5. If asked for specific countries or regions (e.g.netherlands)#
            else:
                #check for empty input first.#
                if settings_servers == "":
                    input("\n\nYou must provide some kind of input.\nPress enter to continue and then type 'help' to view the available options.\n")
                    if input_needed == 2:
                        input_needed = 1
                        continue
                else:
                    sample_countries = [area.strip() for area in settings_servers.split(',')] #take into account possible superfluous spaces#
                    approved_regions = 0
                    for region in sample_countries:
                        if region in [area.lower() for area in areas_list]:
                            approved_regions = approved_regions + 1
                            pass
                        else:
                            input("\n\nThe region/group " + region + " is not available. Please check for spelling errors.\nPress enter to continue.\n")
                            if input_needed == 2:
                                input_needed = 1
                                continue
                    if approved_regions == len(sample_countries):
                        input_needed = 0

    ##if user does not use preloaded settings##
    if "instructions" not in locals():
        #1.add underscore if spaces are present on Linux os#
        for number,element in enumerate(sample_countries):
            if element.count(" ") > 0 and opsys == "Linux":
                    sample_countries[number] = re.sub(" ","_",element)
        else:
            pass
        #2.create insutrctions dict object#
        instructions = {'opsys':opsys,'command':nordvpn_command,'settings':sample_countries}
        if opsys == "Windows":
            instructions['cwd_path'] = cwd_path
        if opsys == "Linux":
            instructions['additional_settings'] = additional_settings_list
            if 'credentials' in locals():
                instructions['credentials'] = credentials
        #3.save the settings if requested into .txt file in project folder#
        if save == 1:
            print("\nSaving settings in project folder...\n")
            try:
                os.remove("settings_nordvpn.txt")
            except FileNotFoundError:
                pass
            instructions_write = json.dumps(instructions)
            f = open("settings_nordvpn.txt", "w")
            f.write(instructions_write)
            f.close()

    print("\nDone!\n")
    return instructions
 def file_exist(self, file_name):
     if not path.exists(file_name):
         return f"The file '{file_name}' does not exist or is outside of the script directory."
     self.files_to_delete.append(file_name)
     return False
Пример #41
0
    def save_results(self, output_dir):
        if self._validation_results is None:
            raise Exception(
                "No results to save. Method validate() must be run before save_results()."
            )

        for learning_point in range(
                self._validation_params['n_learning_points']):

            all_results_list = []
            all_subjects_list = []

            learning_point_dir = path.join(
                output_dir, 'learning_split-' + str(learning_point))

            for iteration in range(self._validation_params['n_iterations']):

                iteration_dir = path.join(learning_point_dir,
                                          'iteration-' + str(iteration))
                if not path.exists(iteration_dir):
                    os.makedirs(iteration_dir)
                iteration_subjects_df = pd.DataFrame({
                    'y':
                    self._validation_results[learning_point][iteration]['y'],
                    'y_hat':
                    self._validation_results[learning_point][iteration]
                    ['y_hat'],
                    'y_index':
                    self._validation_results[learning_point][iteration]
                    ['y_index']
                })
                iteration_subjects_df.to_csv(path.join(iteration_dir,
                                                       'subjects.tsv'),
                                             index=False,
                                             sep='\t',
                                             encoding='utf-8')
                all_subjects_list.append(iteration_subjects_df)

                iteration_results_df = pd.DataFrame(
                    {
                        'balanced_accuracy':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['balanced_accuracy'],
                        'auc':
                        self._validation_results[learning_point][iteration]
                        ['auc'],
                        'accuracy':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['accuracy'],
                        'sensitivity':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['sensitivity'],
                        'specificity':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['specificity'],
                        'ppv':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['ppv'],
                        'npv':
                        self._validation_results[learning_point][iteration]
                        ['evaluation']['npv'],
                        'train_balanced_accuracy':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['balanced_accuracy'],
                        'train_accuracy':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['accuracy'],
                        'train_sensitivity':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['sensitivity'],
                        'train_specificity':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['specificity'],
                        'train_ppv':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['ppv'],
                        'train_npv':
                        self._validation_results[learning_point][iteration]
                        ['evaluation_train']['npv']
                    },
                    index=[
                        'i',
                    ])

                iteration_results_df.to_csv(path.join(iteration_dir,
                                                      'results.tsv'),
                                            index=False,
                                            sep='\t',
                                            encoding='utf-8')

                mean_results_df = pd.DataFrame(
                    iteration_results_df.apply(np.nanmean).to_dict(),
                    columns=iteration_results_df.columns,
                    index=[
                        0,
                    ])
                mean_results_df.to_csv(path.join(iteration_dir,
                                                 'mean_results.tsv'),
                                       index=False,
                                       sep='\t',
                                       encoding='utf-8')
                all_results_list.append(mean_results_df)

            all_subjects_df = pd.concat(all_subjects_list)
            all_subjects_df.to_csv(path.join(learning_point_dir,
                                             'subjects.tsv'),
                                   index=False,
                                   sep='\t',
                                   encoding='utf-8')

            all_results_df = pd.concat(all_results_list)
            all_results_df.to_csv(path.join(learning_point_dir, 'results.tsv'),
                                  index=False,
                                  sep='\t',
                                  encoding='utf-8')

            mean_results_df = pd.DataFrame(all_results_df.apply(
                np.nanmean).to_dict(),
                                           columns=all_results_df.columns,
                                           index=[
                                               0,
                                           ])
            mean_results_df.to_csv(path.join(learning_point_dir,
                                             'mean_results.tsv'),
                                   index=False,
                                   sep='\t',
                                   encoding='utf-8')
Пример #42
0
def load(vectors_path):
    if not path.exists(vectors_path):
        raise IOError("Can't find data directory: {}".format(vectors_path))
    vector_map = VectorMap(128)
    vector_map.load(vectors_path)
    return vector_map
Пример #43
0
    def save_results(self, output_dir):
        if self._validation_results is None:
            raise Exception(
                "No results to save. Method validate() must be run before save_results()."
            )

        all_results_list = []
        all_train_subjects_list = []
        all_test_subjects_list = []

        for iteration in range(len(self._validation_results)):

            iteration_dir = path.join(output_dir,
                                      'iteration-' + str(iteration))
            if not path.exists(iteration_dir):
                os.makedirs(iteration_dir)
            iteration_train_subjects_df = pd.DataFrame({
                'iteration':
                iteration,
                'y':
                self._validation_results[iteration]['y_train'],
                'y_hat':
                self._validation_results[iteration]['y_hat_train'],
                'subject_index':
                self._validation_results[iteration]['x_index']
            })
            iteration_train_subjects_df.to_csv(path.join(
                iteration_dir, 'train_subjects.tsv'),
                                               index=False,
                                               sep='\t',
                                               encoding='utf-8')
            all_train_subjects_list.append(iteration_train_subjects_df)

            iteration_test_subjects_df = pd.DataFrame({
                'iteration':
                iteration,
                'y':
                self._validation_results[iteration]['y'],
                'y_hat':
                self._validation_results[iteration]['y_hat'],
                'subject_index':
                self._validation_results[iteration]['y_index']
            })
            iteration_test_subjects_df.to_csv(path.join(
                iteration_dir, 'test_subjects.tsv'),
                                              index=False,
                                              sep='\t',
                                              encoding='utf-8')
            all_test_subjects_list.append(iteration_test_subjects_df)

            iteration_results_df = pd.DataFrame(
                {
                    'balanced_accuracy':
                    self._validation_results[iteration]['evaluation']
                    ['balanced_accuracy'],
                    'auc':
                    self._validation_results[iteration]['auc'],
                    'accuracy':
                    self._validation_results[iteration]['evaluation']
                    ['accuracy'],
                    'sensitivity':
                    self._validation_results[iteration]['evaluation']
                    ['sensitivity'],
                    'specificity':
                    self._validation_results[iteration]['evaluation']
                    ['specificity'],
                    'ppv':
                    self._validation_results[iteration]['evaluation']['ppv'],
                    'npv':
                    self._validation_results[iteration]['evaluation']['npv'],
                    'train_balanced_accuracy':
                    self._validation_results[iteration]['evaluation_train']
                    ['balanced_accuracy'],
                    'train_accuracy':
                    self._validation_results[iteration]['evaluation_train']
                    ['accuracy'],
                    'train_sensitivity':
                    self._validation_results[iteration]['evaluation_train']
                    ['sensitivity'],
                    'train_specificity':
                    self._validation_results[iteration]['evaluation_train']
                    ['specificity'],
                    'train_ppv':
                    self._validation_results[iteration]['evaluation_train']
                    ['ppv'],
                    'train_npv':
                    self._validation_results[iteration]['evaluation_train']
                    ['npv']
                },
                index=[
                    'i',
                ])
            iteration_results_df.to_csv(path.join(iteration_dir,
                                                  'results.tsv'),
                                        index=False,
                                        sep='\t',
                                        encoding='utf-8')

            # mean_results_df = pd.DataFrame(iteration_results_df.apply(np.nanmean).to_dict(),
            #                                columns=iteration_results_df.columns, index=[0, ])
            # mean_results_df.to_csv(path.join(iteration_dir, 'mean_results.tsv'),
            #                        index=False, sep='\t', encoding='utf-8')
            all_results_list.append(iteration_results_df)

        all_train_subjects_df = pd.concat(all_train_subjects_list)
        all_train_subjects_df.to_csv(path.join(output_dir,
                                               'train_subjects.tsv'),
                                     index=False,
                                     sep='\t',
                                     encoding='utf-8')

        all_test_subjects_df = pd.concat(all_test_subjects_list)
        all_test_subjects_df.to_csv(path.join(output_dir, 'test_subjects.tsv'),
                                    index=False,
                                    sep='\t',
                                    encoding='utf-8')

        all_results_df = pd.concat(all_results_list)
        all_results_df.to_csv(path.join(output_dir, 'results.tsv'),
                              index=False,
                              sep='\t',
                              encoding='utf-8')

        mean_results_df = pd.DataFrame(all_results_df.apply(
            np.nanmean).to_dict(),
                                       columns=all_results_df.columns,
                                       index=[
                                           0,
                                       ])
        mean_results_df.to_csv(path.join(output_dir, 'mean_results.tsv'),
                               index=False,
                               sep='\t',
                               encoding='utf-8')

        print("Mean results of the classification:")
        print("Balanced accuracy: %s" %
              (mean_results_df['balanced_accuracy'].to_string(index=False)))
        print("specificity: %s" %
              (mean_results_df['specificity'].to_string(index=False)))
        print("sensitivity: %s" %
              (mean_results_df['sensitivity'].to_string(index=False)))
        print("auc: %s" % (mean_results_df['auc'].to_string(index=False)))
Пример #44
0
    def save_results(self, output_dir):
        if self._repeated_validation_results is None:
            raise Exception(
                "No results to save. Method validate() must be run before save_results()."
            )

        all_results_list = []
        all_subjects_list = []

        for iteration in range(len(self._repeated_validation_results)):

            iteration_dir = path.join(output_dir,
                                      'iteration-' + str(iteration))
            if not path.exists(iteration_dir):
                os.makedirs(iteration_dir)

            iteration_subjects_list = []
            iteration_results_list = []
            folds_dir = path.join(iteration_dir, 'folds')

            if not path.exists(folds_dir):
                os.makedirs(folds_dir)

            for i in range(len(self._repeated_validation_results[iteration])):
                subjects_df = pd.DataFrame({
                    'y':
                    self._repeated_validation_results[iteration][i]['y'],
                    'y_hat':
                    self._repeated_validation_results[iteration][i]['y_hat'],
                    'y_index':
                    self._repeated_validation_results[iteration][i]['y_index']
                })
                subjects_df.to_csv(path.join(
                    folds_dir, 'subjects_fold-' + str(i) + '.tsv'),
                                   index=False,
                                   sep='\t',
                                   encoding='utf-8')
                iteration_subjects_list.append(subjects_df)

                results_df = pd.DataFrame(
                    {
                        'balanced_accuracy':
                        self._repeated_validation_results[iteration][i]
                        ['evaluation']['balanced_accuracy'],
                        'accuracy':
                        self._repeated_validation_results[iteration][i]
                        ['evaluation']['accuracy'],
                        'train_balanced_accuracy':
                        self._repeated_validation_results[iteration][i]
                        ['evaluation_train']['balanced_accuracy'],
                        'train_accuracy':
                        self._repeated_validation_results[iteration][i]
                        ['evaluation_train']['accuracy']
                    },
                    index=[
                        'i',
                    ])
                results_df.to_csv(path.join(folds_dir,
                                            'results_fold-' + str(i) + '.tsv'),
                                  index=False,
                                  sep='\t',
                                  encoding='utf-8')
                iteration_results_list.append(results_df)

            iteration_subjects_df = pd.concat(iteration_subjects_list)
            iteration_subjects_df.to_csv(path.join(iteration_dir,
                                                   'subjects.tsv'),
                                         index=False,
                                         sep='\t',
                                         encoding='utf-8')
            all_subjects_list.append(iteration_subjects_df)

            iteration_results_df = pd.concat(iteration_results_list)
            iteration_results_df.to_csv(path.join(iteration_dir,
                                                  'results.tsv'),
                                        index=False,
                                        sep='\t',
                                        encoding='utf-8')

            mean_results_df = pd.DataFrame(
                iteration_results_df.apply(np.nanmean).to_dict(),
                columns=iteration_results_df.columns,
                index=[
                    0,
                ])
            mean_results_df.to_csv(path.join(iteration_dir,
                                             'mean_results.tsv'),
                                   index=False,
                                   sep='\t',
                                   encoding='utf-8')
            all_results_list.append(mean_results_df)

        all_subjects_df = pd.concat(all_subjects_list)
        all_subjects_df.to_csv(path.join(output_dir, 'subjects.tsv'),
                               index=False,
                               sep='\t',
                               encoding='utf-8')

        all_results_df = pd.concat(all_results_list)
        all_results_df.to_csv(path.join(output_dir, 'results.tsv'),
                              index=False,
                              sep='\t',
                              encoding='utf-8')

        mean_results_df = pd.DataFrame(all_results_df.apply(
            np.nanmean).to_dict(),
                                       columns=all_results_df.columns,
                                       index=[
                                           0,
                                       ])
        mean_results_df.to_csv(path.join(output_dir, 'mean_results.tsv'),
                               index=False,
                               sep='\t',
                               encoding='utf-8')

        print("Mean results of the classification:")
        print("Balanced accuracy: %s" %
              (mean_results_df['balanced_accuracy'].to_string(index=False)))
Пример #45
0
def colored_image(full_path):
  assert osp.exists(full_path), f"File {full_path} does not exist."
  return cv2.imread(full_path)
Пример #46
0
    def save_results(self, output_dir):
        if self._validation_results is None:
            raise Exception(
                "No results to save. Method validate() must be run before save_results()."
            )

        subjects_folds = []
        results_folds = []
        container_dir = path.join(output_dir, 'folds')

        if not path.exists(container_dir):
            os.makedirs(container_dir)

        for i in range(len(self._validation_results)):
            subjects_df = pd.DataFrame({
                'y':
                self._validation_results[i]['y'],
                'y_hat':
                self._validation_results[i]['y_hat'],
                'y_index':
                self._validation_results[i]['y_index']
            })
            subjects_df.to_csv(path.join(container_dir,
                                         'subjects_fold-' + str(i) + '.tsv'),
                               index=False,
                               sep='\t',
                               encoding='utf-8')
            subjects_folds.append(subjects_df)

            results_df = pd.DataFrame(
                {
                    'balanced_accuracy':
                    self._validation_results[i]['evaluation']
                    ['balanced_accuracy'],
                    'auc':
                    self._validation_results[i]['auc'],
                    'accuracy':
                    self._validation_results[i]['evaluation']['accuracy'],
                    'sensitivity':
                    self._validation_results[i]['evaluation']['sensitivity'],
                    'specificity':
                    self._validation_results[i]['evaluation']['specificity'],
                    'ppv':
                    self._validation_results[i]['evaluation']['ppv'],
                    'npv':
                    self._validation_results[i]['evaluation']['npv'],
                    'train_balanced_accuracy':
                    self._validation_results[i]['evaluation_train']
                    ['balanced_accuracy'],
                    'train_accuracy':
                    self._validation_results[i]['evaluation_train']
                    ['accuracy'],
                    'train_sensitivity':
                    self._validation_results[i]['evaluation_train']
                    ['sensitivity'],
                    'train_specificity':
                    self._validation_results[i]['evaluation_train']
                    ['specificity'],
                    'train_ppv':
                    self._validation_results[i]['evaluation_train']['ppv'],
                    'train_npv':
                    self._validation_results[i]['evaluation_train']['npv']
                },
                index=[
                    'i',
                ])

            results_df.to_csv(path.join(container_dir,
                                        'results_fold-' + str(i) + '.tsv'),
                              index=False,
                              sep='\t',
                              encoding='utf-8')
            results_folds.append(results_df)

        all_subjects = pd.concat(subjects_folds)
        all_subjects.to_csv(path.join(output_dir, 'subjects.tsv'),
                            index=False,
                            sep='\t',
                            encoding='utf-8')

        all_results = pd.concat(results_folds)
        all_results.to_csv(path.join(output_dir, 'results.tsv'),
                           index=False,
                           sep='\t',
                           encoding='utf-8')

        mean_results = pd.DataFrame(all_results.apply(np.nanmean).to_dict(),
                                    columns=all_results.columns,
                                    index=[
                                        0,
                                    ])
        mean_results.to_csv(path.join(output_dir, 'mean_results.tsv'),
                            index=False,
                            sep='\t',
                            encoding='utf-8')

        print("Mean results of the classification:")
        print("Balanced accuracy: %s" %
              (mean_results['balanced_accuracy'].to_string(index=False)))
        print("specificity: %s" %
              (mean_results['specificity'].to_string(index=False)))
        print("sensitivity: %s" %
              (mean_results['sensitivity'].to_string(index=False)))
        print("auc: %s" % (mean_results['auc'].to_string(index=False)))
Пример #47
0
from os.path import exists

from setuptools import find_packages, setup

NAME = find_packages(exclude=['*.tests'])[0]

with open('requirements.txt') as f:
    install_requires = f.read().strip().split('\n')

if exists('README.rst'):
    with open('README.rst') as f:
        long_description = f.read()
else:
    long_description = ''

setup(
    name=NAME,
    packages=[NAME],
    install_requires=install_requires,
    author='Haiying Xu',
    author_email='*****@*****.**',
    description='A benchmarking results publish application repository',
    long_description=long_description,
    keywords='aiohttp benchmarking web_publish ',
    url='http://example.com/HelloWorld/',  # project home page, if any
    project_urls={
        'Source Code': 'https://github.com/ncar-xdev/aiohttp_app_template',
        'Documentation': 'https://ncar-xdev.github.io/aiohttp_app_template',
        'Bug Tracker':
        'https://github.com/ncar-xdev/aiohttp_app_template/issues',
    },
Пример #48
0



if __name__ == '__main__':
    

    args = parse_args()

    with open(args.json_file) as f:
        ann_dict = json.load(f)
    print(ann_dict.keys())

    out_dir = osp.join(args.output_dir, 
                       osp.splitext(osp.basename(args.json_file))[0])
    if not osp.exists(out_dir):
        os.makedirs(out_dir, exist_ok=True)    
    
    i = 0
    for img_annot in tqdm(ann_dict['images']):
        image_name = img_annot['file_name']
        print(osp.join(args.imdir, image_name))
        if not osp.exists(osp.join(args.imdir, image_name)):
            continue
        image_id = img_annot['id']
        bboxes = [x['bbox'] for x in ann_dict['annotations'] \
                        if x['image_id'] == image_id]
        im = cv2.imread(osp.join(args.imdir, image_name))
        assert im.size > 0
        im_det = draw_detection_list(im, np.array(bboxes))
        out_path = osp.join(out_dir, image_name.replace('/', '_'))
Пример #49
0
triggers = {}
tfile = "triggers.json"
tokenf = "token.txt"
ignored = []
separator = '/'

admins = [1115929317]


def is_recent(m):
    return (time() - m.date) < 60


# Check if Triggers file exists.
if exists(tfile):
    with open(tfile) as f:
        triggers = json.load(f)
else:
    # print("Triggers file not found, creating.")
    with open(tfile, 'w') as f:
        json.dump({}, f)

# Check if Token file exists, if not, create.
if exists(tokenf):
    with open(tokenf) as f:
        token = f.readline().rstrip('\n')
    # print("Token = [" + token + "]")
else:
    # print("Token File not found, creating.")
    with open(tokenf, 'w') as f:
Пример #50
0
def prep_image(full_path):
  assert osp.exists(full_path), f"File {full_path} does not exist."
  return cv2.imread(full_path, cv2.IMREAD_GRAYSCALE)
Пример #51
0
def save_to_db(opts, cmp_result, tad_result, reso, inputs, richA_stats, firsts,
               param_hash, launch_time, finish_time):
    if 'tmpdb' in opts and opts.tmpdb:
        # check lock
        while path.exists(path.join(opts.workdir, '__lock_db')):
            time.sleep(0.5)
        # close lock
        open(path.join(opts.workdir, '__lock_db'), 'a').close()
        # tmp file
        dbfile = opts.tmpdb
        copyfile(path.join(opts.workdir, 'trace.db'), dbfile)
    else:
        dbfile = path.join(opts.workdir, 'trace.db')
    con = lite.connect(dbfile)
    with con:
        cur = con.cursor()
        cur.execute("""SELECT name FROM sqlite_master WHERE
                       type='table' AND name='JOBs'""")
        if not cur.fetchall():
            cur.execute("""
            create table PATHs
               (Id integer primary key,
                JOBid int, Path text, Type text,
                unique (Path))""")
            cur.execute("""
            create table JOBs
               (Id integer primary key,
                Parameters text,
                Launch_time text,
                Finish_time text,
                Type text,
                Parameters_md5 text,
                unique (Parameters_md5))""")
        cur.execute("""SELECT name FROM sqlite_master WHERE
                       type='table' AND name='SEGMENT_OUTPUTs'""")
        if not cur.fetchall():
            cur.execute("""
            create table SEGMENT_OUTPUTs
               (Id integer primary key,
                JOBid int,
                Inputs text,
                TADs int,
                Compartments int,
                richA_corr real,
                EV_index int,
                EValue real,
                Chromosome text,
                Resolution int)""")
        try:
            parameters = digest_parameters(opts,
                                           get_md5=False,
                                           extra=['fasta'])
            cur.execute(
                """
            insert into JOBs
            (Id  , Parameters, Launch_time, Finish_time, Type , Parameters_md5)
            values
            (NULL,       '%s',        '%s',        '%s', 'Segment',       '%s')
            """ %
                (parameters, time.strftime("%d/%m/%Y %H:%M:%S", launch_time),
                 time.strftime("%d/%m/%Y %H:%M:%S", finish_time), param_hash))
        except lite.IntegrityError:
            pass
        jobid = get_jobid(cur)
        for ncrm, crm in enumerate(
                max(list(cmp_result.keys()), list(tad_result.keys()),
                    key=len)):
            if crm in cmp_result:
                add_path(cur, cmp_result[crm]['path_cmprt1'], 'COMPARTMENT',
                         jobid, opts.workdir)
                add_path(cur, cmp_result[crm]['path_cmprt2'], 'COMPARTMENT',
                         jobid, opts.workdir)
                add_path(cur, cmp_result[crm]['image_cmprt'], 'FIGURE', jobid,
                         opts.workdir)
                if opts.savecorr:
                    add_path(cur, cmp_result[crm]['path_cormat'],
                             'CROSS_CORR_MAT', jobid, opts.workdir)
            if crm in tad_result:
                add_path(cur, tad_result[crm]['path'], 'TAD', jobid,
                         opts.workdir)
            if opts.rich_in_A:
                add_path(cur, opts.rich_in_A, 'BED', jobid, opts.workdir)

            if crm in firsts:
                evalue = firsts[crm][0][(opts.ev_index[ncrm] -
                                         1) if opts.ev_index else 0]
                eindex = opts.ev_index[ncrm] if opts.ev_index else 1
            else:
                evalue = 'NULL'
                eindex = 'NULL'
            try:
                cur.execute("""
                insert into SEGMENT_OUTPUTs
                (Id  , JOBid, Inputs, TADs, Compartments, richA_corr, EV_index, EValue, Chromosome, Resolution)
                values
                (NULL,    %d,   '%s',   %s,           %s,         %s,       %s,     %s,       '%s',         %d)
                """ % (jobid, ','.join([str(i) for i in inputs]),
                       tad_result[crm]['num'] if crm in tad_result else 'NULL',
                       cmp_result[crm]['num'] if crm in cmp_result else 'NULL',
                       (richA_stats[crm] if crm in richA_stats
                        and richA_stats[crm] is not None else 'NULL'), eindex,
                       evalue, crm, reso))
            except lite.OperationalError:  # TODO: remove this
                print_exc()
                try:
                    cur.execute(
                        "alter table SEGMENT_OUTPUTs add column 'richA_corr' 'real'"
                    )
                except:
                    pass
                try:
                    cur.execute(
                        "alter table SEGMENT_OUTPUTs add column 'EValue' 'real'"
                    )
                except:
                    pass
                try:
                    cur.execute(
                        "alter table SEGMENT_OUTPUTs add column 'EV_index', 'int'"
                    )
                except:
                    pass
                cur.execute("""
                insert into SEGMENT_OUTPUTs
                (Id  , JOBid, Inputs, TADs, Compartments, richA_corr, EV_index, EValue, Chromosome, Resolution)
                values
                (NULL,    %d,   '%s',   %d,           %d,         %s,       %s,     %s,       '%s',         %d)
                """ % (jobid, ','.join([str(i) for i in inputs]),
                       tad_result[crm]['num'] if crm in tad_result else 0,
                       cmp_result[crm]['num'] if crm in cmp_result else 0,
                       (richA_stats[crm] if crm in richA_stats
                        and richA_stats[crm] is not None else 'NULL'), eindex,
                       evalue, crm, reso))
        print_db(cur, 'PATHs')
        print_db(cur, 'JOBs')
        print_db(cur, 'SEGMENT_OUTPUTs')
    if 'tmpdb' in opts and opts.tmpdb:
        # copy back file
        copyfile(dbfile, path.join(opts.workdir, 'trace.db'))
        remove(dbfile)
        # release lock
        remove(path.join(opts.workdir, '__lock_db'))
Пример #52
0
def source_code(m):
    cid = m.chat.id
    if exists(__file__):
        bot.send_document(cid, open(__file__, 'rb'))
    else:
        bot.reply_to(m, "No source file found :x")
Пример #53
0
    exit(1)

if not os.path.exists(file) or not os.path.isfile(file):
    print 'File,', file, 'is not a valid file'
    exit(2)

if name is None:
    print "Give a descriptive name for the data using -n '<descriptive name>'"
    exit(3)

print 'Parsing file,', file
fileName = os.path.split(file)[1]

if fileName == "spotters.dat":
    workFile = "/tmp/spotters.dat"
    if path.exists(workFile) and path.isfile(workFile) and access(
            workFile, R_OK):
        print "Attempting to cleanup work directory"
        os.system("rm /tmp/spotters.dat")
    else:
        print "No preliminary cleanup needed - continuing"

    shutil.copy(file, workFile)
    os.system("sed -i -e 's/spotterName/spottersName/g' /tmp/spotters.dat")
    os.system("sed -i -e 's/spotterAddr/spottersAddress/g' /tmp/spotters.dat")
    os.system("sed -i -e 's/spotterCity/spottersCity/g' /tmp/spotters.dat")
    os.system("sed -i -e 's/spotterPhone/spottersPhone/g' /tmp/spotters.dat")
    file = workFile

if exportFileName is None:
    exportFileName = fileName
Пример #54
0
def main():
    warnings.warn("This script is aimed to demonstrate how to convert the\n"
                  "JSON file to a single image dataset, and not to handle\n"
                  "multiple JSON files to generate a real-use dataset.")

    parser = argparse.ArgumentParser()
    parser.add_argument('json_file')
    parser.add_argument('-o', '--out', default=None)
    args = parser.parse_args()

    json_file = args.json_file

    alist = os.listdir(json_file)

    for i in range(0, len(alist)):
        path = os.path.join(json_file, alist[i])
        data = json.load(open(path))

        out_dir = osp.basename(path).replace('.', '_')
        out_dir = osp.join(osp.dirname(path), out_dir)

        if not osp.exists(out_dir):
            os.mkdir(out_dir)

        if data['imageData']:
            imageData = data['imageData']
        else:
            imagePath = os.path.join(os.path.dirname(path), data['imagePath'])
            with open(imagePath, 'rb') as f:
                imageData = f.read()
                imageData = base64.b64encode(imageData).decode('utf-8')

        img = utils.img_b64_to_arr(imageData)

        label_name_to_value = {'_background_': 0}
        for shape in data['shapes']:
            label_name = shape['label']
            if label_name in label_name_to_value:
                label_value = label_name_to_value[label_name]
            else:
                label_value = len(label_name_to_value)
                label_name_to_value[label_name] = label_value

        # label_values must be dense
        label_values, label_names = [], []
        for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]):
            label_values.append(lv)
            label_names.append(ln)
        assert label_values == list(range(len(label_values)))

        lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

        captions = ['{}: {}'.format(lv, ln)
                    for ln, lv in label_name_to_value.items()]
        lbl_viz = utils.draw_label(lbl, img, captions)

        PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png'))
        utils.lblsave(osp.join(out_dir, 'label.png'), lbl)
        PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png'))

        with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
            for lbl_name in label_names:
                f.write(lbl_name + '\n')

        warnings.warn('info.yaml is being replaced by label_names.txt')
        info = dict(label_names=label_names)
        with open(osp.join(out_dir, 'info.yaml'), 'w') as f:
            yaml.safe_dump(info, f, default_flow_style=False)

        print('Saved to: %s' % out_dir)
Пример #55
0
def auto_parse_schema(location):
    """
    Attempts to automatically parse the file at location (if local) and extract a schema.
    This function will only parse files that have an extension in AUTO_PARSE_EXTS and are
    smaller than AUTO_PARSE_THRESHOLD bytes.

    The function returns a rendered YAML array if the schema was parsed successfully, None
    otherwise.

    TODO: The parsing is done by Pandas (using the various pd.read_* functions), and the schema 
    is just a stringification of DataFrame.dtypes, which has several downsides (e.g. string columns
    have dtype 'object').

    Returns:
        str -- A rendered YAML containing the schema, suitable for inclusion in the template.
    """
    if not path.exists(location):
        return None
    if not check_pandas_installed():
        click.secho("Pandas is not installed, skipping parsing.", fg="yellow")
        return None

    _, ext = path.splitext(location.lower())
    if ext not in AUTO_PARSE_EXTS:
        click.secho("Unrecognized extension {}, skipping parsing.".format(ext),
                    fg="yellow")

    size = path.getsize(location)
    if size > AUTO_PARSE_THRESHOLD:
        click.secho("File is large, skipping parsing.")
        return None

    import pandas as pd

    try:
        if ext == ".csv":
            df = pd.read_csv(location, parse_dates=True, nrows=10)
        elif ext == ".tsv":
            df = pd.read_csv(location,
                             delimiter='\t',
                             parse_dates=True,
                             nrows=10)
        elif ext == ".json":
            df = pd.read_json(location)
        elif ext == ".parquet":
            df = pd.read_parquet(location)
        elif ext == ".xls" or ext == ".xlsx":
            df = pd.read_excel(location)

        schema = [
            "- {}: {} [No description]".format(field, str(kind))
            for field, kind in df.dtypes.to_dict().items()
        ]

        click.secho(
            "{} fields automatically parsed. Please check schema for accuracy."
            .format(len(schema)))
        return "\n".join(schema)

    except Exception as e:
        print(e)
        click.secho("Pandas could not parse this file, skipping parsing.")
        return None
Пример #56
0
    def train(self,
              nb_steps=30000,
              verbose=1,
              visualize=False,
              log_interval=3000):
        if self.__istrained:
            raise RuntimeError('このモデルは既に訓練済みです。')

        print('訓練を行うので、お待ちください。')

        # 訓練実施
        # Okay, now it's time to learn something!
        # We visualize the training here for show, but this slows down training quite a lot.
        # You can always safely abort the training prematurely using Ctrl + C.
        callbacks = []
        if verbose == 1:
            self.train_interval_logger = TrainIntervalLogger2(
                interval=log_interval)
            callbacks.append(self.train_interval_logger)
            verbose = 0
        elif verbose > 1:
            callbacks.append(TrainEpisodeLogger())
            verbose = 0

        hist = self.dqn.fit(self.env,
                            nb_steps=nb_steps,
                            callbacks=callbacks,
                            verbose=verbose,
                            visualize=visualize,
                            log_interval=log_interval)
        self.__istrained = True

        if self.train_interval_logger is not None:
            # 訓練状況の可視化
            interval = self.train_interval_logger.records['interval']
            episode_reward = self.train_interval_logger.records[
                'episode_reward']
            mean_q = self.train_interval_logger.records['mean_q']
            if len(interval) > len(mean_q):
                mean_q = np.pad(mean_q, [len(interval) - len(mean_q), 0],
                                "constant")
            plt.figure()
            plt.plot(interval, episode_reward, marker='.', label='報酬')
            plt.plot(interval, mean_q, marker='.', label='Q値')
            plt.legend(loc='best', fontsize=10)
            plt.grid()
            plt.xlabel('interval')
            plt.ylabel('score')
            plt.title('訓練状況')
            plt.xticks(
                np.arange(min(interval),
                          max(interval) + 1,
                          (max(interval) - min(interval)) // 7))
            plt.show()

        # 重みの保存
        if not exists(self.__class__.weightdir):
            try:
                mkdir(self.__class__.weightdir)
            except:
                print('重み保存フォルダの作成中にエラーが発生しました。')
                print('Unexpected error:', exc_info()[0])
                raise
        try:
            # After training is done, we save the final weights.
            self.dqn.save_weights(self.weightfile, overwrite=True)
        except:
            print('重みの保存中にエラーが発生しました。')
            print('Unexpected error:', exc_info()[0])
            raise

        return hist
Пример #57
0
def plot_interpolate_volspread(stock_index,ticker,date,**kwargs):    
    """Merge all buy volumes, all sell volumes and interpolate results in order
       to be able to define the buy/sell volume spread. 
       Returns are moving-averaged.
    """
    vol_signal, tstamps = split_buy_sell_vols(stock_index,ticker,date)
    vol_sbuy = pd.DataFrame(columns=['Time','Vol Strong Buy'])
    vol_wbuy = pd.DataFrame(columns = ['Time','Vol Buy'])
    vol_mspbuy = pd.DataFrame(columns = ['Time','Vol Mid Spread Buy'])
    vol_mspsell = pd.DataFrame(columns = ['Time','Vol Mid Spread Sell'])
    vol_wsell = pd.DataFrame(columns = ['Time','Vol Sell'])
    vol_ssell = pd.DataFrame(columns = ['Time','Vol Strong Sell'])
    
    a = vol_signal.loc[vol_signal['Signal']==3][['Time','Vol']]
    vol_sbuy = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Strong Buy': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal']==2][['Time','Vol']]                        
    vol_wbuy = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Weak Buy': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal']==1][['Time','Vol']]                           
    vol_mspbuy = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Mid Spread Buy': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal']==-1][['Time','Vol']]                             
    vol_mspsell = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Mid Spread Sell': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal']==-2][['Time','Vol']]                            
    vol_wsell = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Weak Sell': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal']==-3][['Time','Vol']]                            
    vol_ssell = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Strong Sell': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal'].isin([3,2,1])][['Time','Vol']]
    vol_buy = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Buy': a['Vol'].values})
    a = vol_signal.loc[vol_signal['Signal'].isin([-3,-2,-1])][['Time','Vol']]
    vol_sell = pd.DataFrame({'Time': a['Time'].values, \
                            'Vol Sell': a['Vol'].values})
    # Interpolate buy and sell volumes to have trading events at common times
    vol_buy_interp = vol_interpolate(vol_buy,vol_sell,'buy').fillna(0)
    vol_sell_interp = vol_interpolate(vol_sell,vol_buy,'sell').fillna(0)
    # Volume spread
    Vol_spread = vol_buy_interp['Vol_buy'] - vol_sell_interp['Vol_sell']    
    vol_buy_interp['Vol_spread'] = Vol_spread 
    # Set the format for Time: tseries (default) or timestamps  
    intt2stamp = lambda df, ts: pd.Series(ts.loc[ts['Unnamed: 0'].isin(df['Time'].\
                                astype(int))]['Time'].values)
    if kwargs['tformat'] == 'tstamps':
        vol_buy_interp['Time'] = tstamps['Time']
        vol_sell_interp['Time'] = tstamps['Time']
        vol_wbuy['Time'] = intt2stamp(vol_wbuy,tstamps)
        vol_wsell['Time'] = intt2stamp(vol_wsell,tstamps)
        vol_sbuy['Time'] = intt2stamp(vol_sbuy,tstamps)
        vol_ssell['Time'] = intt2stamp(vol_ssell,tstamps)        
        vol_mspbuy['Time'] = intt2stamp(vol_mspbuy,tstamps)
        vol_mspsell['Time'] = intt2stamp(vol_mspsell,tstamps)

    ax=vol_wbuy.plot(x='Time',y='Vol Weak Buy',marker='o',linestyle='dashed',\
            color=(0.4,0.7,1),title='Buy and Sell Volumes for '+ ticker,legend=False)
    vol_wsell.plot(ax=ax,x='Time',y='Vol Weak Sell',marker='o',\
            linestyle='dashed',color=(1,0.6,0.6),legend=False)    
    if not vol_sbuy.empty:
        vol_sbuy.plot(ax=ax,x='Time',y='Vol Strong Buy',style='ob',legend=False)
    if not vol_ssell.empty:    
        vol_ssell.plot(ax=ax,x='Time',y='Vol Strong Sell',style='or',legend=False)
    if not vol_mspbuy.empty:
        vol_mspbuy.plot(ax=ax,x='Time',y='Vol Mid Spread Buy',style='og',legend=False)  
    if not vol_mspsell.empty:
        vol_mspsell.plot(ax=ax,x='Time',y='Vol Mid Spread Sell',style='og',legend=False)  
                                        
    dir_save = img_dir +'vol_evolution/'+ date +'/'
    if not exists(dir_save):
        mkdir(dir_save)
    fig = ax.get_figure() 
    fig.set_size_inches(8, 6)
    fig.savefig(dir_save+ticker+'.png',bbox_inches='tight',dpi=100)
    plt.close(fig)
Пример #58
0
def may_make_dir(path):
    if path in [None, '']:
        return
    if not osp.exists(path):
        os.makedirs(path)
Пример #59
0
def split_buy_sell_vols(stock_index,ticker,date):
    """
        Build buy/sell volume signals and stock returns.
        Buy/Sell volumes are divided into the following categories:
        -----------------------------------------------------------
        Strong Buy: 3, 
        Buy: 2, 
        Mid Spread Buy: 1,
        Mid Spread Sell: -1,
        Sell: -2, 
        Strong Sell: -3.         
    """
    input_dir = data_dir + stock_index + '/' + date + '/'
    df = pd.read_csv(input_dir + 'order_book/' + ticker +'.csv')
    # Volume of signal         
    vol_signal = pd.DataFrame(columns=['Time','Signal','Vol'])
    vol_buy = 0
    vol_sell = 0     
    for k in range(df.shape[0]):        
        bid, ask = df['Bid'][k], df['Ask'][k]
        last_price = df['Last Price'][k]
        last_size = df['Last Size'][k]
        t = df['Unnamed: 0'][k]
        if last_price == bid:
            # At the bid
            vol_sell += last_size 
            vol_signal.loc[vol_signal.shape[0]] = [t,-3,vol_sell]
        elif last_price == ask:
            # At the ask
            vol_buy += last_size
            vol_signal.loc[vol_signal.shape[0]] = [t,3,vol_buy]
        elif abs(last_price-bid) < abs(last_price-ask):
            # Near the bid
            vol_sell += last_size
            vol_signal.loc[vol_signal.shape[0]] = [t,-2,vol_sell]
        elif abs(last_price-bid) > abs(last_price-ask):
            # Near the ask
            vol_buy += last_size
            vol_signal.loc[vol_signal.shape[0]] = [t,2,vol_buy]
        else:
            # When at the middle of bid-ask spread or (no ask | no bid)
            # Randomly assign a buy (1) or sell (0) signal
            if np.random.randint(2) == 1:
                vol_buy += last_size
                vol_signal.loc[vol_signal.shape[0]] = [t,1,vol_buy]
            else:
                vol_sell += last_size
                vol_signal.loc[vol_signal.shape[0]] = [t,-1,vol_sell]      
    # Normalize volumes with respect to float shares
    fundm = input_dir + 'fundm_' + date + '.csv'
    if not exists(fundm):
        #sf.request_fundamentals(stock_index)
        sf.yahoof_key_statistics(stock_index)
    # Float from billions to millions    
    Float = 1e9*pd.read_csv(fundm,index_col='Ticker').loc[ticker]['Float']
    #Float = pd.read_csv(fundm,index_col='Ticker').loc[ticker]['Float']
    # Express results in basis points (*10^4)           
    bp = 1e4     
    vol_signal['Vol'] = bp * vol_signal['Vol']/Float       
    # Time stamps of trading events
    tstamps = pd.concat([df['Unnamed: 0'],pd.to_datetime(df['Time']).dt.time],axis=1)
    return vol_signal, tstamps                                      
Пример #60
0
    new_folder = join(folder, file_title)
    new_path = join(new_folder, new_filename)
    makedirs(new_folder, exist_ok=True)
    copyfile(path, new_path)

    with open(
            join(new_folder, '.presentation'), mode='w', encoding="utf8") as f:
        dump({'Title': title, 'Author': author}, f)

    shell_call('git add "{}"'.format(new_folder))

    return title, author


if __name__ == '__main__':
    if not (exists('_tools') and isdir('_tools')):
        print("Run this from the CppCon{} root.".format(CPPCON_YEAR))
        exit(1)

    TITLE = None
    AUTHOR = None
    if len(sys.argv) == 2 and sys.argv[1]:
        TITLE, AUTHOR = add_presentation(sys.argv[1])

    with open('README.md', mode='w', encoding="utf8") as readme:
        make_readme(readme)

    shell_call('git add README.md')
    if TITLE and AUTHOR:
        shell_call('git commit -v -m "Add {} by {}" -e'.format(TITLE, AUTHOR))
    else: