コード例 #1
0
def main():
    cur_dir = os.path.dirname(__file__)
    os.chdir(os.path.join(cur_dir, ".."))
    modules = sys.argv[1:]

    if not modules:
        modules = ['django_evolution']

    p = subprocess.Popen(['pyflakes'] + modules,
                         stderr=subprocess.PIPE,
                         stdout=subprocess.PIPE,
                         close_fds=True)

    contents = p.stdout.readlines()
    # Read in the exclusions file
    exclusions = {}
    fp = open(os.path.join(cur_dir, "pyflakes.exclude"), "r")

    for line in fp.readlines():
        if not line.startswith("#"):
            exclusions[line.rstrip()] = 1

    fp.close()

    # Now filter thin
    for line in contents:
        line = line.rstrip()
        test_line = re.sub(r':[0-9]+:', r':*:', line, 1)
        test_line = re.sub(r'line [0-9]+', r'line *', test_line)

        if test_line not in exclusions:
            print line
コード例 #2
0
def main(name):
  try:
    from python_qt_binding.QtGui import QApplication
  except:
    print >> sys.stderr, "please install 'python_qt_binding' package!!"
    sys.exit(-1)

  masteruri = init_cfg_path()
  parser = init_arg_parser()
  args = rospy.myargv(argv=sys.argv)
  parsed_args = parser.parse_args(args[1:])
  # Initialize Qt
  global app
  app = QApplication(sys.argv)

  # decide to show main or echo dialog
  global main_form
  if parsed_args.echo:
    main_form = init_echo_dialog(name, masteruri, parsed_args.echo[0], parsed_args.echo[1], parsed_args.hz)
  else:
    main_form = init_main_window(name, masteruri, parsed_args.file)

  # resize and show the qt window
  if not rospy.is_shutdown():
    os.chdir(PACKAGE_DIR) # change path to be able to the images of descriptions
    main_form.resize(1024, 720)
    screen_size = QApplication.desktop().availableGeometry()
    if main_form.size().width() >= screen_size.width() or main_form.size().height() >= screen_size.height()-24:
      main_form.showMaximized()
    else:
      main_form.show()
    exit_code = -1
    rospy.on_shutdown(finish)
    exit_code = app.exec_()
コード例 #3
0
ファイル: test_examples.py プロジェクト: mitocw/latex2edx
    def test_merge(self):
        testdir = path(l2emod.__file__).parent / 'testtex'
        with make_temp_directory() as tmdir:
            fn = testdir / 'example1.tex'
            print "file %s" % fn
            nfn = '%s/%s' % (tmdir, fn.basename())
            os.system('cp %s/* %s' % (testdir, tmdir))
            os.chdir(tmdir)
            l2e = latex2edx(nfn, output_dir=tmdir)
            l2e.convert()

            fn = testdir / 'example2.tex'
            print "file %s" % fn
            nfn = '%s/%s' % (tmdir, fn.basename())
            l2e = latex2edx(nfn, output_dir=tmdir, do_merge=True)
            l2e.convert()

            cfn = path(tmdir) / 'course/2013_Fall.xml'
            self.assertTrue(os.path.exists(cfn))

            self.assertIn('<chapter url_name="Unit_1"', open(cfn).read())
            self.assertIn('<chapter url_name="Unit_2"', open(cfn).read())

            cfn = path(tmdir) / 'chapter/Unit_1.xml'
            self.assertTrue(os.path.exists(cfn))

            cfn = path(tmdir) / 'chapter/Unit_2.xml'
            self.assertTrue(os.path.exists(cfn))
コード例 #4
0
ファイル: test_bundler.py プロジェクト: Javex/PyGitUp
def test_bundler():
    """ Run bundler integration """
    shell = True if platform.system() == 'Windows' else False

    if os.environ.get('TRAVIS', False):
        raise SkipTest('Skip this test on Travis CI :(')

    # Helper methods
    def is_installed(prog):
        dev_null = open(os.devnull, 'wb')
        try:
            return_value = subprocess.call([prog, '--version'], shell=shell,
                                           stdout=dev_null, stderr=dev_null)
            return return_value == 0
        except OSError:
            return False

    def get_output(cmd):
        return str(subprocess.check_output(cmd, shell=shell))

    # Check for ruby and bundler
    if not (is_installed('ruby') and is_installed('gem')
            and 'bundler' in get_output(['gem', 'list'])):

        # Ruby not installed, skip test
        raise SkipTest('Ruby not installed, skipped Bundler integration test')

    os.chdir(repo_path)

    from PyGitUp.gitup import GitUp
    gitup = GitUp(testing=True)
    gitup.run()
コード例 #5
0
ファイル: update-docs.py プロジェクト: m4rw3r/rust-conv
def init_doc_branch():
    msg("Initialising %s branch" % DOC_TARGET_BRANCH)

    dir = os.getcwdu()
    msg_trace('dir = %r' % dir)

    tmp = tempfile.mkdtemp(prefix=TEMP_CHECKOUT_PREFIX)
    msg_trace('tmp = %r' % tmp)

    try:
        msg("Cloning into a temporary directory...")
        sh('git init -q "%s"' % tmp)
        msg_trace('os.chdir(%r)' % tmp)
        os.chdir(tmp)
        sh('git checkout -q --orphan "%s"' % DOC_TARGET_BRANCH)
        sh('git commit -qm "Initial commit." --allow-empty')
        sh('git remote add origin "%s"' % dir)
        sh('git push -q origin gh-pages')

    finally:
        msg('Cleaning up...')
        msg_trace('os.chdir(%r)' % dir)
        os.chdir(dir)
        msg_trace('shutil.rmtree(%r)' % tmp)
        really_rmtree(tmp)

    msg('%s is ready.  Continuing.' % DOC_TARGET_BRANCH)
コード例 #6
0
ファイル: Git.py プロジェクト: njqaaa/deploy
def mergeToBranch(branchName,branchType,branchDesc):
    '''
    把分支合并进 master
    '''
    shellPath = settings.SHELL_PATH
    os.chdir(shellPath)
    try:
        if_merge = VersionList.objects.filter(branch_name = branchName)[0]
        message = "该分支已被合并到master了,请去进行中项目里生成新分支"
    except:
        if_merge = 'no_merge'
        opResult = os.popen(shellPath+"git-merge-branch.sh %s %s %s %s" % (branchName,branchType,branchType,branchDesc))
        outputs = "<br/>".join(opResult.readlines()) # log日志
        preg = outputs.find('have conflict')
        no_branch = outputs.find('no this branch')
        error_repo = outputs.find('repo is error')
        if_push_ok = outputs.find('FAIL')
        if_pass = outputs.find('PASS')
        if error_repo >=0:
            message = str(branchType)+"仓库异常,请联系管理员"
        elif no_branch >=0:
            message = "该分支不存在,请仔细核对,专心点啊=。="
        elif preg >=0:
            message = "rebase master 出现冲突,请解决冲突,分支号为:"+str(branchName)
        elif if_push_ok >=0:
            message = "git上没有显示该分支的merge信息,也没有冲突,请确认分支名"
        elif if_pass >=0:
            message = "合并成功"
        else:
            message = "no status"

    return  message
コード例 #7
0
ファイル: test_examples.py プロジェクト: mitocw/latex2edx
    def test_example1(self):
        testdir = path(l2emod.__file__).parent / 'testtex'
        fn = testdir / 'example1.tex'
        print "file %s" % fn
        with make_temp_directory() as tmdir:
            nfn = '%s/%s' % (tmdir, fn.basename())
            os.system('cp %s/* %s' % (testdir, tmdir))
            os.chdir(tmdir)
            l2e = latex2edx(nfn, output_dir=tmdir)
            l2e.convert()
            xbfn = nfn[:-4] + '.xbundle'
            self.assertTrue(os.path.exists(xbfn))
            # xb = open(xbfn).read()

            # self.assertIn('<chapter display_name="Unit 1" start="2013-11-22" url_name="Unit_1">', xb)
            xml = etree.parse(xbfn).getroot()
            chapter = xml.find('.//chapter')
            self.assertTrue(chapter.get('display_name') == 'Unit 1')
            self.assertTrue(chapter.get('start') == '2013-11-22')
            self.assertTrue(chapter.get('url_name') == 'Unit_1')

            cfn = path(tmdir) / 'course/2013_Fall.xml'
            self.assertTrue(os.path.exists(cfn))

            cfn = path(tmdir) / 'chapter/Unit_1.xml'
            self.assertTrue(os.path.exists(cfn))

            # self.assertIn('<sequential display_name="Introduction" due="2013-11-22" url_name="Introduction"', open(cfn).read())
            xml = etree.parse(cfn).getroot()
            seq = xml.find('.//sequential')
            self.assertTrue(seq.get('display_name') == 'Introduction')
            self.assertTrue(seq.get('due') == '2013-11-22')
            self.assertTrue(seq.get('url_name') == 'Introduction')

            self.assertIn('<problem url_name="p1"/>', open(cfn).read())
コード例 #8
0
def buildComponent(component, componentDir=None):
    """ Build the component. Return a pair of paths
        (pathToBinary, pathToXPTFile)"""
    logger = build_util.getLogger('build_components')
    # Save current working directory to set it back later
    prevDir = os.getcwd()
    # Component Directory
    if componentDir is None:
        componentDir = os.path.join(Settings.prefs.src_dir, "components", component)

    os.chdir(componentDir)
    logger.info("Making build and bin dirs for component %s"%component)
    buildDir = os.path.join(componentDir, "build")
    binDir = os.path.join(componentDir, "bin")
    
    for dir in [buildDir, binDir]:
        try:
            os.mkdir(dir)
        except OSError, err:
            if err.errno == errno.EEXIST:
                logger.warning("Couldn't make %s because it exists."%dir)
                logger.warning("Deleting %s"%dir)
                shutil.rmtree(dir)
                logger.warning("Trying to make %s again"%dir)
                os.mkdir(dir)
            else:
                raise
コード例 #9
0
ファイル: rundaca2.py プロジェクト: VKlayd/cppcheck
def daca2(foldernum):
    folders = '0123456789abcdefghijklmnopqrstuvwxyz'
    folder = folders[foldernum % len(folders)]

    print('Daca2 folder=' + folder)

    os.chdir(os.path.expanduser('~/cppcheck'))
    subprocess.call(['git', 'pull'])
    p = subprocess.Popen(['git', 'show', '--format=%h'],
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    comm = p.communicate()
    rev = comm[0]
    rev = rev[:rev.find('\n')]

    # compile cppcheck
    subprocess.call(['nice', 'make', 'SRCDIR=build', 'CFGDIR=' + os.path.expanduser('~/cppcheck/cfg'), 'CXXFLAGS=-g -O2', 'CPPFLAGS=-DMAXTIME=600'])
    subprocess.call(['cp', 'cppcheck', os.path.expanduser('~/daca2/cppcheck-O2')])

    # run cppcheck
    subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/' + folder)])
    subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2.py'), folder, '--rev=' + rev])
    upload(os.path.expanduser('~/daca2/' + folder + '/results.txt'), 'evidente/results-' + folder + '.txt')
    subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/lib' + folder)])
    subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2.py'), 'lib' + folder, '--rev=' + rev])
    upload(os.path.expanduser('~/daca2/lib' + folder + '/results.txt'), 'evidente/results-lib' + folder + '.txt')

    # run cppcheck addons
    subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/' + folder)])
    subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2-addons.py'), folder, '--rev=' + rev])
    upload(os.path.expanduser('~/daca2/' + folder + '/results.txt'), 'evidente/addons-' + folder + '.txt')
    subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/lib' + folder)])
    subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2-addons.py'), 'lib' + folder, '--rev=' + rev])
    upload(os.path.expanduser('~/daca2/lib' + folder + '/results.txt'), 'evidente/addons-lib' + folder + '.txt')
コード例 #10
0
ファイル: upgrade_master.py プロジェクト: Geeglee/buildbot
def upgradeMaster(config, _noMonkey=False):
    if not _noMonkey:  # pragma: no cover
        monkeypatches.patch_all()

    if not base.checkBasedir(config):
        defer.returnValue(1)
        return

    os.chdir(config['basedir'])

    try:
        configFile = base.getConfigFileFromTac(config['basedir'])
    except (SyntaxError, ImportError) as e:
        print("Unable to load 'buildbot.tac' from '%s':" % config['basedir'])
        print(e)

        defer.returnValue(1)
        return
    master_cfg = base.loadConfig(config, configFile)
    if not master_cfg:
        defer.returnValue(1)
        return

    upgradeFiles(config)
    yield upgradeDatabase(config, master_cfg)

    if not config['quiet']:
        print("upgrade complete")

    defer.returnValue(0)
コード例 #11
0
ファイル: util.py プロジェクト: BAXTER001/atom-shell
def scoped_cwd(path):
  cwd = os.getcwd()
  os.chdir(path)
  try:
    yield
  finally:
    os.chdir(cwd)
コード例 #12
0
ファイル: create-launcher.py プロジェクト: drmrboy/gpodder
def build_launcher(out_path, icon_path, file_desc, product_name, product_version,
                   company_name, entry_point, is_gui):

    src_ico = os.path.abspath(icon_path)
    target = os.path.abspath(out_path)

    file_version = product_version

    dir_ = os.getcwd()
    temp = tempfile.mkdtemp()
    try:
        os.chdir(temp)
        with open("launcher.c", "w") as h:
            h.write(get_launcher_code(entry_point))
        shutil.copyfile(src_ico, "launcher.ico")
        with open("launcher.rc", "w") as h:
            h.write(get_resource_code(
                os.path.basename(target), file_version, file_desc,
                "launcher.ico", product_name, product_version, company_name))

        build_resource("launcher.rc", "launcher.res")
        build_exe("launcher.c", "launcher.res", is_gui, target)
    finally:
        os.chdir(dir_)
        shutil.rmtree(temp)
コード例 #13
0
def download_file(url, to_directory='.'):
    cwd = os.getcwd()
    os.chdir(to_directory)

    try:
        file_name = url.split('/')[-1]
        u = urllib2.urlopen(url)
        f = open(file_name, 'wb')
        meta = u.info()
        etag = meta.getheaders("etag")[0]
        file_size = int(meta.getheaders("Content-Length")[0])
        print "Downloading: %s Bytes: %s" % (file_name, file_size)

        file_size_dl = 0
        block_sz = 8192
        while True:
            buffer = u.read(block_sz)
            if not buffer:
                break
            file_size_dl += len(buffer)
            f.write(buffer)
            status = r"%10d  [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
            status = status + chr(8)*(len(status)+1)
            print status,
        f.close()
        return etag
    finally:
        os.chdir(cwd)
コード例 #14
0
    def test_no_setup_cfg(self):
        # makes sure easy_install as a command (main)
        # doesn't use a setup.cfg file that is located
        # in the current working directory
        dir = tempfile.mkdtemp()
        setup_cfg = open(os.path.join(dir, 'setup.cfg'), 'w')
        setup_cfg.write('[easy_install]\nfind_links = http://example.com')
        setup_cfg.close()
        setup_py = open(os.path.join(dir, 'setup.py'), 'w')
        setup_py.write(SETUP_PY)
        setup_py.close()

        from setuptools.dist import Distribution

        def _parse_command_line(self):
            msg = 'Error: a local setup.cfg was used'
            opts = self.command_options
            if 'easy_install' in opts:
                assert 'find_links' not in opts['easy_install'], msg
            return self._old_parse_command_line()

        Distribution._old_parse_command_line = Distribution.parse_command_line
        Distribution.parse_command_line = _parse_command_line

        old_wd = os.getcwd()
        try:
            os.chdir(dir)
            reset_setup_stop_context(
                lambda: self.assertRaises(SystemExit, main, [])
            )
        finally:
            os.chdir(old_wd)
            shutil.rmtree(dir)
            Distribution.parse_command_line = Distribution._old_parse_command_line
コード例 #15
0
ファイル: test_readcgcef.py プロジェクト: cephurs/readcgcef
 def setUp(self):
     self._cwd = os.getcwd()
     dirname = os.path.dirname(__file__)
     if dirname:
         os.chdir(dirname)
     self.run_cmd(['make', 'clean'])
     self.run_cmd(['make'])
コード例 #16
0
ファイル: enkf.py プロジェクト: ouinformatics/cybercomq
def initEnKFrun(callback=None,basedir=None,site=None,siteparam=None):
    if not site:
        raise "site is a required parameter"
    if siteparam:
        param = ast.literal_eval(siteparam)
    else:
        raise "Parameters are required. Please submit Paramerers with task submission"
    if not basedir:
        raise "Basedir is required"
    #Create working directory
    newDir = basedir + "celery_data/" + str(initEnKFrun.request.id)
    check_call(["mkdir",newDir])
    os.chdir(newDir)
    #copy matlab code to working directory
    codeDir =basedir + 'enfk_matlab/'
    for file in glob.glob(codeDir + '*'): 
        shutil.copy(file, newDir) 
    #check_call(["cp","-r",codeDir + '*',newDir])
    #set inital Paramters files
    setup_param(newDir,param)
    if callback:
        result=subtask(callback).delay(task_id=str(initEnKFrun.request.id),wkdir=newDir)
        return {'task_id':result.task_id,'task_name':result.task_name}
    else:
        return newDir
コード例 #17
0
ファイル: commands.py プロジェクト: 1901/sublime_sync
    def make_new_version_message(self, path):
        """Make a new version message for the repo at the given path."""

        try:
            cwd = os.getcwd()
            os.chdir(path)

            version = self.get_current_tag()

            if version[0] is None:
                return

            messages_path = os.path.join(path, 'messages.json')
            message_path = self.rewrite_messages_json(messages_path, version)

            if os.path.exists(message_path):
                os.remove(message_path)

            with open(message_path, mode='w', encoding='utf-8') as f:
                header = '{} {}'.format(
                    os.path.basename(path),
                    os.path.splitext(os.path.basename(message_path))[0])
                f.write('{}\n{}\n'.format(header, '-' * (len(header) + 1)))
                f.write(self.get_commit_messages_since(version))

            self.window.run_command('open_file', args={'file': message_path})

        except Exception:
            import traceback
            traceback.print_exc()
        finally:
            os.chdir(cwd)
コード例 #18
0
ファイル: common.py プロジェクト: adamchainz/hashdist
def working_directory(path):
    old = os.getcwd()
    try:
        os.chdir(path)
        yield
    finally:
        os.chdir(old)
コード例 #19
0
ファイル: atlas.py プロジェクト: nudded/easybuild
    def configure(self):

        # configure for 64-bit build
        self.updatecfg('configopts', "-b 64")

        if self.getcfg('ignorethrottling'):
            # ignore CPU throttling check
            # this is not recommended, it will disturb the measurements done by ATLAS
            # used for the EasyBuild demo, to avoid requiring root privileges
            self.updatecfg('configopts', '-Si cputhrchk 0')

        # if LAPACK is found, instruct ATLAS to provide a full LAPACK library
        # ATLAS only provides a few LAPACK routines natively
        if self.getcfg('full_lapack'):
            lapack = get_software_root('LAPACK')
            if lapack:
                self.updatecfg('configopts', ' --with-netlib-lapack=%s/lib/liblapack.a' % lapack)
            else:
                self.log.error("netlib's LAPACK library not available,"\
                               " required to build ATLAS with a full LAPACK library.")

        # enable building of shared libraries (requires -fPIC)
        if self.getcfg('sharedlibs') or self.toolkit().opts['pic']:
            self.log.debug("Enabling -fPIC because we're building shared ATLAS libs, or just because.")
            self.updatecfg('configopts', '-Fa alg -fPIC')

        # ATLAS only wants to be configured/built in a separate dir'
        try:
            objdir = "obj"
            os.makedirs(objdir)
            os.chdir(objdir)
        except OSError, err:
            self.log.error("Failed to create obj directory to build in: %s" % err)
コード例 #20
0
 def do_deploy():
     def _run_ansible():
         subprocess.check_call(['ansible-playbook', 'masters.yml'],
                               cwd='result/{}'.format(grid_name))
     parent_deployment._status = 'masters_provision_deploying'
     provision_deployment._status = 'masters_deploying'
     provision_deployment.save()
     parent_deployment.save()
     provision_generator = provision_generators[grid.type][
         grid.provider](grid_name, **kwargs)
     provision_generator.generate_all(
         grid_name, infrastructure_deployment._accessip)
     try:
         _run_ansible()
     except:
         provision_deployment._status = 'masters_deploy_failed'
         parent_deployment._status = 'masters_provision_deploy_failed'
     else:
         provision_deployment._status = 'masters_deployed'
         parent_deployment._status = 'masters_provision_finished'
     finally:
         provision_deployment.save()
         self.unlock(parent_deployment)
         parent_deployment.save()
         os.chdir(cwd)
         try:
             del os.environ['AWS_ACCESS_KEY_ID']
         except:
             print('no such env')
         try:
             del os.environ['AWS_SECRET_ACCESS_KEY']
         except:
             print('no such env')
         shutil.rmtree('result/{}'.format(grid_name))
コード例 #21
0
def test_run_multiproc_nondaemon_with_flag(nondaemon_flag):
    '''
    Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag.
    '''
    
    cur_dir = os.getcwd()
    temp_dir = mkdtemp(prefix='test_engine_')
    os.chdir(temp_dir)
    
    pipe = pe.Workflow(name='pipe')
    
    f1 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f1')
    f2 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f2')

    pipe.connect([(f1,f2,[('sum_out','sum')])])
    pipe.base_dir = os.getcwd()
    f1.inputs.sum = 0
    
    # execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag
    # to enable child processes which start other multiprocessing jobs
    execgraph = pipe.run(plugin="MultiProc", plugin_args={'n_procs':2, 'non_daemon':nondaemon_flag})
    
    names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
    node = execgraph.nodes()[names.index('pipe.f2')]
    result = node.get_output('sum_out')
    yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
    os.chdir(cur_dir)
    rmtree(temp_dir)
コード例 #22
0
ファイル: test_base.py プロジェクト: vsaase/nipype
def test_CommandLine_output():
    tmp_infile = setup_file()
    tmpd, name = os.path.split(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)
    yield assert_true, os.path.exists(tmp_infile)
    ci = nib.CommandLine(command='ls -l')
    ci.inputs.terminal_output = 'allatonce'
    res = ci.run()
    yield assert_equal, res.runtime.merged, ''
    yield assert_true, name in res.runtime.stdout
    ci = nib.CommandLine(command='ls -l')
    ci.inputs.terminal_output = 'file'
    res = ci.run()
    yield assert_true, 'stdout.nipype' in res.runtime.stdout
    yield assert_equal, type(res.runtime.stdout), type('hi')
    ci = nib.CommandLine(command='ls -l')
    ci.inputs.terminal_output = 'none'
    res = ci.run()
    yield assert_equal, res.runtime.stdout, ''
    ci = nib.CommandLine(command='ls -l')
    res = ci.run()
    yield assert_true, 'stdout.nipype' in res.runtime.stdout
    os.chdir(pwd)
    teardown_file(tmpd)
コード例 #23
0
ファイル: test_base.py プロジェクト: vsaase/nipype
def test_cycle_namesource1():
    tmp_infile = setup_file()
    tmpd, nme, ext = split_filename(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)

    class spec3(nib.CommandLineInputSpec):
        moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
                       position=1, name_template='%s_mootpl')
        poo = nib.File(name_source=['moo'], hash_files=False,
                       argstr="%s", position=2)
        doo = nib.File(name_source=['poo'], hash_files=False,
                       argstr="%s", position=3)

    class TestCycle(nib.CommandLine):
        _cmd = "mycommand"
        input_spec = spec3

    # Check that an exception is raised
    to0 = TestCycle()
    not_raised = True
    try:
        to0.cmdline
    except nib.NipypeInterfaceError:
        not_raised = False
    yield assert_false, not_raised

    os.chdir(pwd)
    teardown_file(tmpd)
コード例 #24
0
def main():
    args = prog_options()

    # Iterate through all directories and access their files
    for root, dirs, files in os.walk(args.sample_dir):
        if root != args.sample_dir:
            os.chdir(root)
            print root[24:27]

    # Get relevant file name
            for file in files:
                if file.endswith('.fastq'):
                    trim_out = file

    # Option to get counts of adapters in FLASh output merged file
            most_common_seqs = count_common_seqs(args.base_counts,
                                                 args.top_common_count,
                                                 trim_out)
            with open(args.out_fnh, 'w') as outf:
                outf.write('{}\n'.format(trim_out))
                outf.write('Read Count\tCommon Sequence\t'
                           'Common Sequence Counts\tPercent in reads')
                for d in most_common_seqs:
                    outf.write('{}\t{}\t{}\t{}'
                               .format(d[0], d[1], d[2], d[3]))
            return
コード例 #25
0
ファイル: test_base.py プロジェクト: vsaase/nipype
def test_chained_namesource():
    tmp_infile = setup_file()
    tmpd, nme, ext = split_filename(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)

    class spec2(nib.CommandLineInputSpec):
        doo = nib.File(exists=True, argstr="%s", position=1)
        moo = nib.File(name_source=['doo'], hash_files=False, argstr="%s",
                       position=2, name_template='%s_mootpl')
        poo = nib.File(name_source=['moo'], hash_files=False,
                       argstr="%s", position=3)

    class TestName(nib.CommandLine):
        _cmd = "mycommand"
        input_spec = spec2

    testobj = TestName()
    testobj.inputs.doo = tmp_infile
    res = testobj.cmdline
    yield assert_true, '%s' % tmp_infile in res
    yield assert_true, '%s_mootpl ' % nme in res
    yield assert_true, '%s_mootpl_generated' % nme in res

    os.chdir(pwd)
    teardown_file(tmpd)
コード例 #26
0
ファイル: web_display.py プロジェクト: AnyMesh/anyMesh-Python
def daemonize( errfile ):
    """
    Detach process and become a daemon.
    """
    pid = os.fork()
    if pid:
        os._exit(0)

    os.setsid()
    signal.signal(signal.SIGHUP, signal.SIG_IGN)
    os.umask(0)

    pid = os.fork()
    if pid:
        os._exit(0)

    os.chdir("/")
    for fd in range(0,20):
        try:
            os.close(fd)
        except OSError:
            pass

    sys.stdin = open("/dev/null","r")
    sys.stdout = open("/dev/null","w")
    sys.stderr = ErrorLog( errfile )
コード例 #27
0
ファイル: gitlab-backup.py プロジェクト: donhui/gitlab-backup
def backup_git_by_page(page, logger):
    git = get_gitlab_instance()
    projects = git.projects.all(page=page, per_page=100)
    git_data_path = GIT_SETTINGS.get('git_data_path')
    if 0 == len(projects):
        logger.info("All projects backup completed !")
        exit(0)
    else:
        logger.info("There are %s projects on page %s." % (len(projects), page))
        try:
            for project in projects:
                git_repo_path = os.path.join(git_data_path, project.path_with_namespace + ".git")
                logger.debug("begin to backup git repo %s !" % project.path_with_namespace)

                # if the project has been cloned,then exec git fetch command,else exec git clone command.
                if os.path.exists(git_repo_path):
                    os.chdir(git_repo_path)
                    for output in os.popen("git fetch 2>&1"):
                        record_log_with_level(logger, output)
                else:
                    for output in os.popen("git clone --mirror %s %s 2>&1" % (project.http_url_to_repo, git_repo_path)):
                        record_log_with_level(logger, output)
        except:
            logger.exception('Got exception on logger handler:')
            raise
        logger.info("The projects of page %s backup completed !" % page)
コード例 #28
0
ファイル: test_base.py プロジェクト: vsaase/nipype
def test_TraitedSpec_withNoFileHashing():
    tmp_infile = setup_file()
    tmpd, nme = os.path.split(tmp_infile)
    pwd = os.getcwd()
    os.chdir(tmpd)
    yield assert_true, os.path.exists(tmp_infile)
    class spec2(nib.TraitedSpec):
        moo = nib.File(exists=True, hash_files=False)
        doo = nib.traits.List(nib.File(exists=True))
    infields = spec2(moo=nme, doo=[tmp_infile])
    hashval = infields.get_hashval(hash_method='content')
    yield assert_equal, hashval[1], '8da4669ff5d72f670a46ea3e7a203215'

    class spec3(nib.TraitedSpec):
        moo = nib.File(exists=True, name_source="doo")
        doo = nib.traits.List(nib.File(exists=True))
    infields = spec3(moo=nme, doo=[tmp_infile])
    hashval1 = infields.get_hashval(hash_method='content')

    class spec4(nib.TraitedSpec):
        moo = nib.File(exists=True)
        doo = nib.traits.List(nib.File(exists=True))
    infields = spec4(moo=nme, doo=[tmp_infile])
    hashval2 = infields.get_hashval(hash_method='content')

    yield assert_not_equal, hashval1[1],  hashval2[1]
    os.chdir(pwd)
    teardown_file(tmpd)
コード例 #29
0
ファイル: indexFiles.py プロジェクト: Acidburn0zzz/chump
    def processAux(self, dFrag):
        self.depth=self.depth+1
        if not self.files.has_key(self.depth):
            self.files[self.depth]=[]
        thisDir=self.compoundDir(self.topDir, dFrag)
        os.chdir(thisDir)
        self.theDict[thisDir]={'xml': [], 'bin': [], 'dir': []}
        # print "Processing",thisDir," Depth",self.depth
        thisDirContents=os.listdir(thisDir)
        for fname in thisDirContents:
            if stat.S_ISDIR(os.stat(fname)[stat.ST_MODE]):
                if not re.match("^(CVS|images|search|photos|htdig|\.)", fname) and self.depth<4:
                    self.processAux(self.compoundDir(dFrag,fname))
                    self.handleDir(thisDir, fname)
                    os.chdir(thisDir)
            else:
                # print "File",fname
                if re.match(".*\.xml$", fname):
                    self.handleXML(thisDir, dFrag, fname)
                elif re.match(".*\.(jpe?g|JPG|gif|png|html)$",
                              fname):
                    self.handleBinary(thisDir, fname)

        self.writeIndex(dFrag)
        self.depth=self.depth-1
コード例 #30
0
ファイル: cacert_cp.py プロジェクト: jaeko44/agkyra
def main():
    os.chdir(os.path.dirname(os.path.realpath(__file__)))
    os.system("pip install certifi")

    print "Copying certifi's cacert.pem"
    import certifi
    shutil.copy2(certifi.where(), 'agkyra/resources/cacert.pem')
コード例 #31
0
def lag():

    os.chdir(dir_in)
    
    #get names
    tg_list_name = os.listdir()
    
    x = 808
    y = 809 
    
    
    for t in range(x, y):
        tg_name = tg_list_name[t]
        print(tg_name, '\n')
        
        # #check if the file exists
        # os.chdir(dir_out)
        # if (os.path.isfile(tg_name)):
        #     print('file already exists')
        #     continue
        
        #cd to where the actual file is 
        os.chdir(dir_in)
        
        pred = pd.read_csv(tg_name)
        pred.sort_values(by = 'date', inplace=True)
        pred.reset_index(inplace = True)
        pred.drop('index', axis = 1, inplace = True)
    
    
        #create a daily time series - date_range
        #get only the ymd of the start and end times
        start_time = pred['date'][0].split(' ')[0]
        end_time = pred['date'].iloc[-1].split(' ')[0]
        
        print(start_time, ' - ', end_time, '\n')
        
        date_range = pd.date_range(start_time, end_time, freq = 'D')
        
        
        #defining time changing lambda functions
        time_str = lambda x: str(x)
        time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
        time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
        
                
        """
        first prepare the six time lagging dataframes  
        then use the merge function to merge the original 
        predictor with the lagging dataframes
        """
        
        #prepare lagged time series for time only
        #note here that since ERA20C has 3hrly data
        #the lag_hrs is increased from 6(eraint) to 11 (era20C)
        time_lagged = pd.DataFrame()
        lag_hrs = [0, 6, 12, 18, 24, 30]
        for lag in lag_hrs:
            lag_name = 'lag'+str(lag)
            lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
            lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
                                   columns = [lag_name])
            time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
            
        #datafrmae that contains all lagged time series  (just time)
        time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
        

        pred_lagged = pd.DataFrame()
        for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
            print(time_all.columns[ii])
            #extracting corresponding tag time series
            lag_ts = pd.DataFrame(time_all.iloc[:,ii]) 
            lag_ts.columns = ['date']
            #merge the selected tlagged time with the predictor on = "date"
            pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
            pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
            #sometimes nan values go to the bottom of the dataframe
            #sort df by date -> reset the index -> remove old index
            pred_new.sort_values(by = 'date', inplace=True)
            pred_new.reset_index(inplace=True)
            pred_new.drop('index', axis = 1, inplace= True)
            
            #concatenate lagged dataframe
            if ii == 1:
                pred_lagged = pred_new
            else: 
                pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
                
        
        #cd to saving directory
        os.chdir(dir_out)
        pred_lagged.to_csv(tg_name)
        os.chdir(dir_in)
コード例 #32
0
csv.field_size_limit(100000000)
from itertools import islice
import cleaning_functions as cf

WA_rawdir='R:/FDA/2732 - FDA Tribal Tobacco Retailers/Technical/Task 2 - ' + \
            'Retailers List/Business Data/raw_data/WA'
WA_processdir='R:/FDA/2732 - FDA Tribal Tobacco Retailers/Technical/Task 2 - ' + \
            'Retailers List/Business Data/processed_data/WA'
FDA_dir = 'C:/Users/lpatterson/AnacondaProjects/Tribal_Master'
fda_df = pd.read_excel(FDA_dir + '/input/Public retail data_original.xlsx')
fda_df = fda_df.loc[fda_df['State'] == 'WA', :]
fda_df.reset_index(inplace=True)

# load WA tab-delimited data sets
os.chdir(WA_rawdir)
rawfiles = []
for file in glob.glob("*.txt"):
    rawfiles.append(file)

for k in rawfiles:
    # some inconsistencies with the tab delimited data, so we need to first process with csv
    file = open(k, 'rt', encoding="utf8")
    reader = csv.reader(file, delimiter='\t', quotechar=None)
    csv_list = []

    for m, l in enumerate(reader):
        if m % 100000 == 0:
            print(m)
        csv_list.append(l)
    # make dataframe with first row as column headers
コード例 #33
0
ファイル: luatool.py プロジェクト: zuzu59/NodeMCU_Lua
        sys.exit(0)

    if args.dest is None:
        args.dest = basename(args.src)
        # zzz191020 Affiche le fichier à envoyer
        print("File: " + args.src)

    # open source file for reading
    try:
        try:
            f = open(args.src, "rt")
        except:
            import os
            base_dir = os.path.dirname(os.path.realpath(__file__))
            f = open(os.path.join(base_dir, args.src), "rt")
            os.chdir(base_dir)
    except:
        sys.stderr.write("Could not open input file \"%s\"\n" % args.src)
        sys.exit(1)

    # Verify the selected file will not exceed the size of the serial buffer.
    # The size of the buffer is 256. This script does not accept files with
    # lines longer than 230 characters to have some room for command overhead.
    num_lines = 0
    for ln in f:
        if len(ln) > 230:
            sys.stderr.write(
                "File \"%s\" contains a line with more than 240 "
                "characters. This exceeds the size of the serial buffer.\n" %
                args.src)
            f.close()
コード例 #34
0
import os
from setuptools import find_packages, setup

with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
    README = readme.read()

# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))

setup(
    name='django-react-views',
    version='1.1',
    packages=find_packages(),
    include_package_data=True,
    license='MIT License',
    description='A Django app providing generic Class Based views and template tags that make it easy to use react '
                'alongside django views and templates.',
    long_description=README,
    long_description_content_type="text/markdown",
    author='Johnathan Ryan Hornik',
    author_email='*****@*****.**',
    classifiers=[
        'Environment :: Web Environment',
        'Framework :: Django',
        'Framework :: Django :: 1.11',  # replace "X.Y" as appropriate
        'Intended Audience :: Developers',
        'License :: OSI Approved :: MIT License',  # example license
        'Operating System :: OS Independent',
        'Programming Language :: Python',
        'Programming Language :: Python :: 3',
        'Programming Language :: Python :: 3.4',
コード例 #35
0
import tempfile
import time
import traceback
import uuid

import six

import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils

# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))

ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)

_FALLBACK_SERVER_PORT = 443
_BALANCER_SERVER_PORT = 12000
_BACKEND_SERVER_PORT = 8080

_TEST_TIMEOUT = 30

_FAKE_SERVERS_SAFENAME = 'fake_servers'

# Use a name that's verified by the test certs
_SERVICE_NAME = 'server.test.google.fr'


class CXXLanguage:
コード例 #36
0
############################### Imports ##################################################
import os
curdir = '/Users/Rui/Documents/Graduate/Research/Feature_Clustering/sidex20/'
os.chdir(curdir)

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mds
import librosa
import librosa.display
import cv2
import copy
from sidex_load import sidex_readin
from Feature import conjoin
from feature_detection import feature_detection
from h_clustering import h_clustering
import convex_hull
from datetime import datetime

from save_obj import save_object

path = '/Users/Rui/Documents/Graduate/Research/SIDEX/SIDEX20/sidex20_calibration/'
FS = 1000
NUM_CHANNELS = 16

n_fft = 256
hop_length = int(n_fft / 2)
fmin = 0
fmax = 96  # max frequency to examine
n_mels = 32  #
mask_thres = 50  # noise level threshold to set 0-1 mask
コード例 #37
0
import os
import csv

directory = 'C:\Python27\my_scripts'
os.chdir(directory)

file  = "sgsgvoges002.2015-09-27_001827_732.log.txt"
file_tmp = "TEMP.log.txt"
file1 = "sgsgvoges002.2015-09-27_001827_732-EDITED.log.txt"
file2 = "CP_FW_log1_header.csv"
file3 = 'CP_FW_log1.csv'
file4 = 'CP_FW_log1_row_strip.csv'
file5 = 'CP_FW_log1_unique.csv'

print directory

def line_count(x):
    a = 0
    #if a < 6:
    with open(x, 'rb') as f:
        for line in f:
            a = a + 1
    print a

def preview_file(x):
    a = 0
    # if a < 6:
    with open(x, 'rb') as f:
        for line in f:
            a = a + 1
            if a < 6:
コード例 #38
0
    nightlyMPISuite.addTests(
        KratosUnittest.TestLoader().loadTestsFromTestCases(
            [FractionalStepKOmegaTest]))

    # adding monolithic k-omega-sst high re tests
    nightlyMPISuite.addTests(
        KratosUnittest.TestLoader().loadTestsFromTestCases(
            [MonolithicKOmegaSSTTest]))
    # adding fractional step k-omega-sst high re tests
    nightlyMPISuite.addTests(
        KratosUnittest.TestLoader().loadTestsFromTestCases(
            [FractionalStepKOmegaSSTTest]))

    ### Full MPI set ###########################################################
    allMPISuite = suites['mpi_all']
    allMPISuite.addTests(nightlyMPISuite)  # already contains the smallMPISuite

    allSuite = suites['all']
    allSuite.addTests(allMPISuite)

    return suites


if __name__ == '__main__':
    # this is required by the CI since, CI runs these tests from $KRATOS_HOME folder.
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(
        KratosMultiphysics.Logger.Severity.WARNING)
    KratosUnittest.runTests(AssembleTestSuites())
コード例 #39
0
def setup():
    """Set up for testing: Change to temp dir, delete jsserver.properties if exists there."""
    os.chdir(REL_PATH_TEMPDIR)
    if os.path.exists(FNAME_JSSERVER_PROPS):
        os.remove(FNAME_JSSERVER_PROPS)
コード例 #40
0
#encoding:utf-8
import sys
import os

os.chdir(sys.path[0])
sys.path.append('../pub')
from Mydb import mysql
import redis


def loadcode():

    #添加新增的
    sql = "select * from wraith_whitelist"
    tmp = mysql.queryAll(sql)

    #print "blklist loaded!"
    r = redis.StrictRedis(host='localhost', port=6379, db=3)
    r.flushdb()
    for item in tmp:
        r.set(item['phone_number'], '1')
        #print r.get(item['code'])

    print '成功白名单信息!'


if __name__ == "__main__":
    loadcode()
コード例 #41
0
ファイル: test4_heatmap.py プロジェクト: human7/python_tool
# 这个问题
# ---------------------

import random
from pyecharts import options as opts
from pyecharts.charts import HeatMap
import os,sys

os.chdir(sys.path[0])  # 改变目录

def heatmap_car() -> HeatMap:
    x = ['宝马', '法拉利', '奔驰', '奥迪', '大众', '丰田', '特斯拉']
    y = ['中国','日本','南非','澳大利亚','阿根廷','阿尔及利亚','法国','意大利','加拿大']
    value = [[i, j, random.randint(0, 100)]
             for i in range(len(x)) for j in range(len(y))]
    c = (
        HeatMap()
        .add_xaxis(x)
        .add_yaxis("销量", y, value)
        .set_global_opts(
            title_opts=opts.TitleOpts(title="HeatMap"),
            visualmap_opts=opts.VisualMapOpts(),
        )
    )
    return c

heatmap_car().render('heatmap_pyecharts.html')
コード例 #42
0
ファイル: notify.py プロジェクト: 7gugu/duke-blog
#!/usr/bin/env python3
#
# Software License Agreement (MIT License)
# Author: Duke Fong <*****@*****.**>

import sys, os, json

sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from conf.gconf import gconf
from api.sub.crypt import *
from api.sub.helper import *

os.chdir(os.path.dirname(__file__))

# 'comment_id', 'url', 'title'
notify_tpl = lambda d: f"""\
<html><body>

<p>Hi {d['name']}, I have a new post on my blog, please have a look:</p>

<p>Title: {d['title']}</p>
<p><a href="{gconf['url']}/{d['url']}">{gconf['url']}/{d['url']}</a></p>

<p>If you don't want to continue receiving this, you can click the button next to your comment to close it:</p>
<p><a href="{gconf['url']}/subscribe">{gconf['url']}/subscribe</a></p>
<p>(If you're not currently logged in, you'll need to log in with your email address to continue.)</p>

<p>If you encounter any problems, you can email me at {gconf['admin'][1]} to get help.</p>

</body></html>
"""
コード例 #43
0
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import glob,os
import xlrd
import networkx as nx
import pandas as pd
import numpy as np
from my_network import remove_edges, read_edgelist, weight, all_graph_info
from my_plots import density, hist_plt,hist_log_bin
import matplotlib.pyplot as plt
from my_dataframes import drop_duplicate, drop_emptycell, stock_status


lower = []
os.chdir('/Users/shine/work_hard/financial_network/data/status_wind_612_710')
FileList = glob.glob('*.xlsx') #for workingfile in filelist
print FileList
print "FileList length is", len(FileList) #共20个文件
for workingfile in FileList:
	print workingfile,'is working now'
	filename  = workingfile.split('.')[0]
	print 'filename is', filename
	os.chdir('/Users/shine/work_hard/financial_network/data/status_wind_612_710')
	status = pd.ExcelFile(workingfile)
	status = pd.read_excel(status, 'Wind资讯'.decode('utf-8'))
	status = pd.DataFrame(status)
	status.columns = ['source', 'stock_name', 'limit_ori', 'sus_reason', 'sus_days', 'sus_ori']
	status['limit'] = status.apply (lambda df: stock_status(df, 'limit_ori', 'sus_ori'), axis=1)
	#print len(status) ##经过drop_emptycell和drop_duplicate验证,status中有很多missing data, 如果只提取分析用的几列数可能会改善
	df = status[['source', 'stock_name','limit']]
コード例 #44
0
 clusjobs_contact_dets = dict()
 #subprocess.call(["gunzip", "-kf", args.lig_file])
 #ligand = os.path.abspath(args.lig_file[:-3])
 ligand = os.path.abspath(args.lig_file)
 pdb.set_trace()
 subprocess.call([obabel, ligand, "-O{}".format(ligand)])
 temperature, N = args.temperature, args.num_of_decoys
 ft_options = glob.glob(os.path.join(DOCKING_RESULTS_PATH,\
         dock_jobs[0],'ft.*.*'))
 ft_coef_opts = [os.path.basename(i).split('.')[1] for i in ft_options]
 recs_and_mask_info = dict()
 for ft_coef in ft_coef_opts:
     case_temp_cont_list = []
     for job in dock_jobs:
         job_path = os.path.join(DOCKING_RESULTS_PATH, job)
         os.chdir(job_path)
         print('Process job #' + job + ':')
         print ('job_path: ', job_path)
         pymol.finish_launching(["pymol", "-qc"])
         #receptor = '../../' +job_complex_dict[job][0]
         #rec_mask = '../../'+job_complex_dict[job][1]
         #UNCOMMEND THE FOLLOWING FOR SCC
         #pdbfiles = glob.glob('*.pdb.gz')
         if job not in recs_and_mask_info:
             pdbfiles = glob.glob('*.pdb')
             recs_and_mask_info[job] = dict()
             if len(pdbfiles) == 2: # mask file and recpdb inside the path
                 #mask_zipped = [i for i in pdbfiles if 'mask.pdb.gz' in i][0]
                 #rec_mask = mask_zipped[:-3]
                 #pdbfiles.remove(mask_zipped)
                 rec_mask = [i for i in pdbfiles if 'mask.pdb' in i][0]
コード例 #45
0
parser = argparse.ArgumentParser("create scoring image")
parser.add_argument(
    "--output_image_location_file",
    type=str,
    help=("Name of a file to write image location to, "
          "in format REGISTRY.azurecr.io/IMAGE_NAME:IMAGE_VERSION"))
args = parser.parse_args()

model = Model(ws, name=e.model_name, version=e.model_version)
sources_dir = e.sources_directory_train
if (sources_dir is None):
    sources_dir = 'diabetes_regression'
path_to_scoring = os.path.join(".", sources_dir, "scoring")
cwd = os.getcwd()
os.chdir(path_to_scoring)

image_config = ContainerImage.image_configuration(
    execution_script=e.score_script,
    runtime="python",
    conda_file="conda_dependencies.yml",
    description="Image with ridge regression model",
    tags={
        "area": "diabetes",
        "type": "regression"
    },
)

image = Image.create(name=e.image_name,
                     models=[model],
                     image_config=image_config,
コード例 #46
0
ファイル: conftest.py プロジェクト: tianyunwang/hail
def init(doctest_namespace):
    # This gets run once per process -- must avoid race conditions
    print("setting up doctest...")

    olddir = os.getcwd()
    os.chdir("docs/")

    doctest_namespace['hl'] = hl
    doctest_namespace['agg'] = agg

    if not os.path.isdir("output/"):
        try:
            os.mkdir("output/")
        except OSError:
            pass

    files = ["sample.vds", "sample.qc.vds", "sample.filtered.vds"]
    for f in files:
        if os.path.isdir(f):
            shutil.rmtree(f)

    ds = hl.read_matrix_table('data/example.vds')
    doctest_namespace['ds'] = ds
    doctest_namespace['dataset'] = ds
    doctest_namespace['dataset2'] = ds.annotate_globals(global_field=5)
    doctest_namespace['dataset_to_union_1'] = ds
    doctest_namespace['dataset_to_union_2'] = ds

    v_metadata = ds.rows().annotate_globals(global_field=5).annotate(consequence='SYN')
    doctest_namespace['v_metadata'] = v_metadata

    s_metadata = ds.cols().annotate(pop='AMR', is_case=False, sex='F')
    doctest_namespace['s_metadata'] = s_metadata

    # Table
    table1 = hl.import_table('data/kt_example1.tsv', impute=True, key='ID')
    table1 = table1.annotate_globals(global_field_1=5, global_field_2=10)
    doctest_namespace['table1'] = table1
    doctest_namespace['other_table'] = table1

    table2 = hl.import_table('data/kt_example2.tsv', impute=True, key='ID')
    doctest_namespace['table2'] = table2

    table4 = hl.import_table('data/kt_example4.tsv', impute=True,
                             types={'B': hl.tstruct(B0=hl.tbool, B1=hl.tstr),
                                    'D': hl.tstruct(cat=hl.tint32, dog=hl.tint32),
                                    'E': hl.tstruct(A=hl.tint32, B=hl.tint32)})
    doctest_namespace['table4'] = table4

    people_table = hl.import_table('data/explode_example.tsv', delimiter='\\s+',
                                   types={'Age': hl.tint32, 'Children': hl.tarray(hl.tstr)})
    doctest_namespace['people_table'] = people_table

    # TDT
    doctest_namespace['tdt_dataset'] = hl.import_vcf('data/tdt_tiny.vcf')

    ds2 = hl.variant_qc(ds)
    doctest_namespace['ds2'] = ds2.select_rows(AF=ds2.variant_qc.AF)

    # Expressions
    doctest_namespace['names'] = hl.literal(['Alice', 'Bob', 'Charlie'])
    doctest_namespace['a1'] = hl.literal([0, 1, 2, 3, 4, 5])
    doctest_namespace['a2'] = hl.literal([1, -1, 1, -1, 1, -1])
    doctest_namespace['t'] = hl.literal(True)
    doctest_namespace['f'] = hl.literal(False)
    doctest_namespace['na'] = hl.null(hl.tbool)
    doctest_namespace['call'] = hl.call(0, 1, phased=False)
    doctest_namespace['a'] = hl.literal([1, 2, 3, 4, 5])
    doctest_namespace['d'] = hl.literal({'Alice': 43, 'Bob': 33, 'Charles': 44})
    doctest_namespace['interval'] = hl.interval(3, 11)
    doctest_namespace['locus_interval'] = hl.parse_locus_interval("1:53242-90543")
    doctest_namespace['locus'] = hl.locus('1', 1034245)
    doctest_namespace['x'] = hl.literal(3)
    doctest_namespace['y'] = hl.literal(4.5)
    doctest_namespace['s1'] = hl.literal({1, 2, 3})
    doctest_namespace['s2'] = hl.literal({1, 3, 5})
    doctest_namespace['s3'] = hl.literal({'Alice', 'Bob', 'Charlie'})
    doctest_namespace['struct'] = hl.struct(a=5, b='Foo')
    doctest_namespace['tup'] = hl.literal(("a", 1, [1, 2, 3]))
    doctest_namespace['s'] = hl.literal('The quick brown fox')
    doctest_namespace['interval2'] = hl.Interval(3, 6)

    # Overview
    doctest_namespace['ht'] = hl.import_table("data/kt_example1.tsv", impute=True)
    doctest_namespace['mt'] = ds

    gnomad_data = ds.rows()
    doctest_namespace['gnomad_data'] = gnomad_data.select(gnomad_data.info.AF)

    # BGEN
    bgen = hl.import_bgen('data/example.8bits.bgen',
                          entry_fields=['GT', 'GP', 'dosage'])
    doctest_namespace['variants_table'] = bgen.rows()

    print("finished setting up doctest...")
    yield
    os.chdir(olddir)
コード例 #47
0
def _restore_working_dir():
    curdir = os.getcwd()
    try:
        yield
    finally:
        os.chdir(curdir)
コード例 #48
0
        os.chdir(olddir)
    return False


def main(args):
    parser = optparse.OptionParser()
    options, args = parser.parse_args(args)

    olddir = os.getcwd()
    try:
        os.chdir(srcdir)

        try:
            deps_js = generate_deps_js()
        except parse_deps.DepsException, ex:
            sys.stderr.write("Error: %s\n\n" % str(ex))
            return 255

        o = open(os.path.join(srcdir, "deps.js"), 'w')
        o.write(deps_js)
        o.close()

    finally:
        os.chdir(olddir)

    return 0


if __name__ == "__main__":
    sys.exit(main(sys.argv))
コード例 #49
0
import argparse
import os
from yolo_detect.yolo import YOLO
from yolo_detect.detect_video import detect_video
from PIL import Image

current_path = os.path.dirname(os.path.abspath(__file__))
path_suffix = '/apps'
if current_path.endswith(path_suffix):
    parent_path = current_path.rsplit(path_suffix, 1)[0]
    os.chdir(parent_path)


def detect_img(yolo):
    while True:
        img = input('Input image filename:')
        try:
            image = Image.open(img)
        except:
            print('Open Error! Try again!')
            continue
        else:
            r_image = yolo.detect_image(image)
            r_image.show()
    yolo.close_session()


FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
コード例 #50
0
 def tearDown(self):
     shutil.rmtree(self.test_tmpdir)
     os.chdir(self.old_dir)
コード例 #51
0
def verify_package(verbose=True):
    """Ensure the package has a config file and a valid experiment file."""
    is_passing = True

    # Check for existence of required files.
    required_files = [
        "config.txt",
        "experiment.py",
        "requirements.txt",
    ]

    for f in required_files:
        if os.path.exists(f):
            log("✓ {} is PRESENT".format(f), chevrons=False, verbose=verbose)
        else:
            log("✗ {} is MISSING".format(f), chevrons=False, verbose=verbose)
            is_passing = False

    # Check the experiment file.
    if os.path.exists("experiment.py"):

        # Check if the experiment file has exactly one Experiment class.
        tmp = tempfile.mkdtemp()
        for f in ["experiment.py", "config.txt"]:
            shutil.copyfile(f, os.path.join(tmp, f))

        cwd = os.getcwd()
        os.chdir(tmp)

        open("__init__.py", "a").close()
        exp = imp.load_source('experiment', os.path.join(tmp, "experiment.py"))

        classes = inspect.getmembers(exp, inspect.isclass)
        exps = [
            c for c in classes if (c[1].__bases__[0].__name__ in "Experiment")
        ]

        if len(exps) == 0:
            log("✗ experiment.py does not define an experiment class.",
                delay=0,
                chevrons=False,
                verbose=verbose)
            is_passing = False
        elif len(exps) == 1:
            log("✓ experiment.py defines 1 experiment",
                delay=0,
                chevrons=False,
                verbose=verbose)
        else:
            log("✗ experiment.py defines more than one experiment class.",
                delay=0,
                chevrons=False,
                verbose=verbose)
        os.chdir(cwd)

    # Make sure there's a help file.
    is_txt_readme = os.path.exists("README.md")
    is_md_readme = os.path.exists("README.txt")
    if (not is_md_readme) and (not is_txt_readme):
        is_passing = False
        log("✗ README.txt or README.md is MISSING.",
            delay=0,
            chevrons=False,
            verbose=verbose)
    else:
        log("✓ README is OK", delay=0, chevrons=False, verbose=verbose)

    # Check front-end files do not exist
    files = [
        os.path.join("templates", "complete.html"),
        os.path.join("templates", "error.html"),
        os.path.join("templates", "launch.html"),
        os.path.join("templates", "thanks.html"),
        os.path.join("static", "css", "dallinger.css"),
        os.path.join("static", "scripts", "dallinger.js"),
        os.path.join("static", "scripts", "reqwest.min.js"),
        os.path.join("static", "robots.txt")
    ]

    for f in files:
        if os.path.exists(f):
            log("✗ {} will CONFLICT with shared front-end files inserted at run-time, "
                "please delete or rename.".format(f),
                delay=0,
                chevrons=False,
                verbose=verbose)
            return False

    log("✓ no file conflicts", delay=0, chevrons=False, verbose=verbose)

    return is_passing
コード例 #52
0
"""
Created on Wed Dec  2 20:27:48 2020

@author: caghangir
"""

import os
import mne
import numpy as np
import matplotlib.pyplot as plt
import extremeEEGSignalAnalyzer as chetto_EEG
import pickle
# from mne.preprocessing import ICA
chetto_EEG = chetto_EEG.extremeEEGSignalAnalyzer()
# %matplotlib qt
os.chdir('/home/caghangir/Desktop/PhD/Lucid Dream EEG/Extracted Dataset/Epochs/ICA Yes, SSP Yes, CSD Yes')

#%%=== MNE Configuration =======
try:
    mne.set_config('MNE_USE_CUDA', True)
except TypeError as err:
    print(err)
print(mne.get_config())  # same as mne.get_config(key=None)
print(mne.get_config('MNE_USE_CUDA'))
#%% ========== Lucidity Period Taking (Pre-Lucid-Post) ===========
# folder_path_o_kniebeugen = 'C:/Users/caghangir/Desktop/PhD/Research/Lucid Dream EEG/Total Data/Datasets/2_Christian_Tausch/Final EDF Files'
# folder_path_FILD = 'C:/Users/caghangir/Desktop/PhD/Research/Lucid Dream EEG/Total Data/Datasets/FILD_Raw_data/Lucidity Found'
# folder_path_Jarrot = 'C:/Users/caghangir/Desktop/PhD/Research/Lucid Dream EEG/Total Data/Datasets/LD_EEG_Jarrot/EDF Files'
# folder_path_Sergio = 'C:/Users/caghangir/Desktop/PhD/Research/Lucid Dream EEG/Total Data/Datasets/Sergio'

#pre-lucid-post  (203,301,302,303,304_2,501,603_2)
コード例 #53
0
def main(argv=sys.argv):
    log = logging.getLogger('heat-config')
    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(
        logging.Formatter(
            '[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
    log.addHandler(handler)
    log.setLevel('DEBUG')

    prepare_dir(OUTPUTS_DIR)
    prepare_dir(WORKING_DIR)
    os.chdir(WORKING_DIR)

    c = json.load(sys.stdin)

    use_hiera = c['options'].get('enable_hiera', False)
    use_facter = c['options'].get('enable_facter', True)
    modulepath = c['options'].get('modulepath')
    tags = c['options'].get('tags')

    facts = {}
    hiera = {}

    fqdn = get_hostname_f(log)
    if fqdn:
        facts['FACTER_fqdn'] = fqdn

    for input in c['inputs']:
        input_name = input['name']
        input_value = input.get('value', '')
        if use_facter:
            fact_name = 'FACTER_%s' % input_name
            facts[fact_name] = input_value
        if use_hiera:
            hiera[input_name] = input_value

    if use_hiera:
        prepare_dir(HIERA_DATADIR)
        hiera_data = os.path.join(HIERA_DATADIR,
                                  'heat_config_%s.json' % c['name'])
        with os.fdopen(os.open(hiera_data,
                               os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o600),
                       'w') as hiera_file:
            hiera_file.write(json.dumps(hiera).encode('utf8'))
        facts['FACTER_deploy_config_name'] = c['name']

    fn = os.path.join(WORKING_DIR, '%s.pp' % c['id'])
    heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id'])
    facts['FACTER_heat_outputs_path'] = heat_outputs_path

    env_debug = ' '.join('%s="%s" ' % (k, v) for k, v in facts.items())

    env = os.environ.copy()
    env.update(facts)

    with os.fdopen(os.open(fn, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o700),
                   'w') as f:
        f.write(c.get('config', '').encode('utf-8'))
    cmd = [PUPPET_CMD, 'apply', '--detailed-exitcodes', fn]
    if modulepath:
        cmd.insert(-1, '--modulepath')
        cmd.insert(-1, modulepath)
    if tags:
        cmd.insert(-1, '--tags')
        cmd.insert(-1, tags)
    log.debug('Running %s %s' % (env_debug, ' '.join(cmd)))
    try:
        subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE, env=env)
    except OSError:
        log.warn('puppet not installed yet')
        return
    stdout, stderr = subproc.communicate()

    log.info('Return code %s' % subproc.returncode)
    if stdout:
        log.info(stdout)
    if stderr:
        log.info(stderr)

    # returncode of 2 means there were successfull changes
    if subproc.returncode in (0, 2):
        returncode = 0
        log.info('Completed %s' % fn)
    else:
        returncode = subproc.returncode
        log.error("Error running %s. [%s]\n" % (fn, subproc.returncode))

    response = {}

    for output in c.get('outputs') or []:
        output_name = output['name']
        try:
            with open('%s.%s' % (heat_outputs_path, output_name)) as out:
                response[output_name] = out.read()
        except IOError:
            pass

    response.update({
        'deploy_stdout': stdout,
        'deploy_stderr': stderr,
        'deploy_status_code': returncode,
    })

    json.dump(response, sys.stdout)
コード例 #54
0
def setup_experiment(debug=True, verbose=False, app=None, exp_config=None):
    """Check the app and, if compatible with Dallinger, freeze its state."""
    print_header()

    # Verify that the package is usable.
    log("Verifying that directory is compatible with Dallinger...")
    if not verify_package(verbose=verbose):
        raise AssertionError(
            "This is not a valid Dallinger app. " +
            "Fix the errors and then try running 'dallinger verify'.")

    # Verify that the Postgres server is running.
    try:
        psycopg2.connect(database="x", user="******", password="******")
    except psycopg2.OperationalError as e:
        if "could not connect to server" in str(e):
            raise RuntimeError("The Postgres server isn't running.")

    # Load configuration.
    config = get_config()
    if not config.ready:
        config.load_config()

    # Check that the demo-specific requirements are satisfied.
    try:
        with open("requirements.txt", "r") as f:
            dependencies = [r for r in f.readlines() if r[:3] != "-e "]
    except:
        dependencies = []

    pkg_resources.require(dependencies)

    # Generate a unique id for this experiment.
    generated_uid = public_id = str(uuid.uuid4())

    # If the user provided an app name, use it everywhere that's user-facing.
    if app:
        public_id = str(app)

    log("Experiment id is " + public_id + "")

    # Copy this directory into a temporary folder, ignoring .git
    dst = os.path.join(tempfile.mkdtemp(), public_id)
    to_ignore = shutil.ignore_patterns(os.path.join(".git", "*"), "*.db",
                                       "snapshots", "data", "server.log")
    shutil.copytree(os.getcwd(), dst, ignore=to_ignore)

    click.echo(dst)

    # Save the experiment id
    with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
        file.write(generated_uid)

    # Change directory to the temporary folder.
    cwd = os.getcwd()
    os.chdir(dst)

    # Write the custom config
    if exp_config:
        config.extend(exp_config)

    config.write_config(filter_sensitive=True)

    # Zip up the temporary directory and place it in the cwd.
    if not debug:
        log("Freezing the experiment package...")
        shutil.make_archive(
            os.path.join(cwd, "snapshots", public_id + "-code"), "zip", dst)

    # Check directories.
    if not os.path.exists(os.path.join("static", "scripts")):
        os.makedirs(os.path.join("static", "scripts"))
    if not os.path.exists("templates"):
        os.makedirs("templates")
    if not os.path.exists(os.path.join("static", "css")):
        os.makedirs(os.path.join("static", "css"))

    # Rename experiment.py for backwards compatibility.
    os.rename(os.path.join(dst, "experiment.py"),
              os.path.join(dst, "dallinger_experiment.py"))

    # Get dallinger package location.
    from pkg_resources import get_distribution
    dist = get_distribution('dallinger')
    src_base = os.path.join(dist.location, dist.project_name)

    heroku_files = [
        "Procfile",
        "launch.py",
        "worker.py",
        "clock.py",
    ]

    for filename in heroku_files:
        src = os.path.join(src_base, "heroku", filename)
        shutil.copy(src, os.path.join(dst, filename))

    clock_on = config.get('clock_on', False)

    # If the clock process has been disabled, overwrite the Procfile.
    if not clock_on:
        src = os.path.join(src_base, "heroku", "Procfile_no_clock")
        shutil.copy(src, os.path.join(dst, "Procfile"))

    frontend_files = [
        os.path.join("static", "css", "dallinger.css"),
        os.path.join("static", "scripts", "dallinger.js"),
        os.path.join("static", "scripts", "reqwest.min.js"),
        os.path.join("templates", "error.html"),
        os.path.join("templates", "launch.html"),
        os.path.join("templates", "complete.html"),
        os.path.join("templates", "thanks.html"),
        os.path.join("static", "robots.txt")
    ]

    for filename in frontend_files:
        src = os.path.join(src_base, "frontend", filename)
        shutil.copy(src, os.path.join(dst, filename))

    time.sleep(0.25)

    os.chdir(cwd)

    return (public_id, dst)
コード例 #55
0
from os.path import dirname, abspath
from os import chdir
from time import time
import numpy as np
from numpy.linalg import norm
import message_filters
import struct
import json
import cv2
import config
from utils import draw_angled_text
import warnings

# Change directory to the file directory
dir_path = dirname(abspath(__file__))
chdir(dir_path)

# Except warnings as errors
warnings.filterwarnings("error")

# TODO: check hold functionality for multiple objects

class PlanarPoseEstimation():
    def __init__(self, config):
        """
        Generates poses for detected objects.

        Parameters
        ----------
        config : object
            Contains the configuration for the class.
コード例 #56
0
def deploy_sandbox_shared_setup(verbose=True,
                                app=None,
                                web_procs=1,
                                exp_config=None):
    """Set up Git, push to Heroku, and launch the app."""
    if verbose:
        out = None
    else:
        out = open(os.devnull, 'w')

    (id, tmp) = setup_experiment(debug=False,
                                 verbose=verbose,
                                 app=app,
                                 exp_config=exp_config)

    # Register the experiment using all configured registration services.
    config = get_config()
    if config.get("mode") == u"live":
        log("Registering the experiment on configured services...")
        registration.register(id, snapshot=None)

    # Log in to Heroku if we aren't already.
    log("Making sure that you are logged in to Heroku.")
    heroku.log_in()
    click.echo("")

    # Change to temporary directory.
    cwd = os.getcwd()
    os.chdir(tmp)

    # Commit Heroku-specific files to tmp folder's git repo.
    cmds = [
        "git init", "git add --all", 'git commit -m "Experiment ' + id + '"'
    ]
    for cmd in cmds:
        subprocess.check_call(cmd, stdout=out, shell=True)
        time.sleep(0.5)

    # Load configuration.
    config = get_config()
    if not config.ready:
        config.load_config()

    # Initialize the app on Heroku.
    log("Initializing app on Heroku...")

    create_cmd = [
        "heroku",
        "apps:create",
        app_name(id),
        "--buildpack",
        "https://github.com/thenovices/heroku-buildpack-scipy",
    ]

    # If a team is specified, assign the app to the team.
    try:
        team = config.get("heroku_team", None)
        if team:
            create_cmd.extend(["--org", team])
    except Exception:
        pass

    subprocess.check_call(create_cmd, stdout=out)
    database_size = config.get('database_size')

    try:
        if config.get('whimsical'):
            whimsical = "true"
        else:
            whimsical = "false"
    except:
        whimsical = "false"

    # Set up postgres database and AWS environment variables.
    cmds = [
        "heroku addons:create heroku-postgresql:{}".format(
            quote(database_size)),
        "heroku pg:wait",
        "heroku addons:create heroku-redis:premium-0",
        "heroku addons:create papertrail",
        "heroku config:set HOST=" + app_name(id) + ".herokuapp.com",
        "heroku config:set aws_access_key_id=" +
        quote(config.get('aws_access_key_id')),
        "heroku config:set aws_secret_access_key=" +
        quote(config.get('aws_secret_access_key')),
        "heroku config:set aws_region=" + quote(config.get('aws_region')),
        "heroku config:set auto_recruit={}".format(config.get('auto_recruit')),
        "heroku config:set dallinger_email_username="******"heroku config:set dallinger_email_key=" +
        quote(config.get('dallinger_email_password')),
        "heroku config:set heroku_email_address=" +
        quote(config.get('heroku_email_address')),
        "heroku config:set heroku_password="******"heroku config:set whimsical={}".format(whimsical),
    ]
    for cmd in cmds:
        subprocess.check_call(cmd + " --app " + app_name(id),
                              stdout=out,
                              shell=True)

    # Wait for Redis database to be ready.
    log("Waiting for Redis...")
    ready = False
    while not ready:
        redis_URL = subprocess.check_output(
            "heroku config:get REDIS_URL --app {}".format(app_name(id)),
            shell=True)
        r = redis.from_url(redis_URL)
        try:
            r.set("foo", "bar")
            ready = True
        except redis.exceptions.ConnectionError:
            time.sleep(2)

    log("Saving the URL of the postgres database...")
    db_url = subprocess.check_output("heroku config:get DATABASE_URL --app " +
                                     app_name(id),
                                     shell=True)
    # Set the notification URL and database URL in the config file.
    config.extend({
        "notification_url":
        u"http://" + app_name(id) + ".herokuapp.com/notifications",
        "database_url": db_url.rstrip().decode('utf8'),
    })
    config.write_config()

    subprocess.check_call("git add config.txt", stdout=out, shell=True),
    time.sleep(0.25)
    subprocess.check_call(
        'git commit -m "Save URLs for database and notifications"',
        stdout=out,
        shell=True)
    time.sleep(0.25)

    # Launch the Heroku app.
    log("Pushing code to Heroku...")
    subprocess.check_call("git push heroku HEAD:master",
                          stdout=out,
                          stderr=out,
                          shell=True)

    log("Scaling up the dynos...")
    scale_up_dynos(app_name(id))

    time.sleep(8)

    # Launch the experiment.
    log("Launching the experiment on MTurk...")

    launch_request = requests.post('https://{}.herokuapp.com/launch'.format(
        app_name(id)))
    launch_data = launch_request.json()

    log("URLs:")
    log("App home: https://{}.herokuapp.com/".format(app_name(id)),
        chevrons=False)
    log("Initial recruitment: {}".format(
        launch_data.get('recruitment_url', None)),
        chevrons=False)

    # Return to the branch whence we came.
    os.chdir(cwd)

    log("Completed deployment of experiment " + id + ".")
コード例 #57
0
def regex_searcher():
    list_files = []
    for i in os.listdir(os.getcwd()):
        if os.path.isfile(i):
            list_files.append(i)
    for file in list_files:
        if regex_date.findall(file):
            mo = regex_date.search(file)
            part1 = mo.group(1)
            part2 = mo.group(2)
            part3 = mo.group(3)
            part4 = mo.group(4)
            part5 = mo.group(5)
            filename = part1 + part3 + "-" + part2 + "-" + part4 + part5
            rename_us_to_eu(file, filename)
        else:
            print("not if")
    pass


if __name__ == "__main__":
    if os.path.isdir("tmp"):
        os.chdir("tmp")
    else:
        os.mkdir("tmp")
        os.chdir("tmp")
    path = os.path.abspath(os.getcwd())
    touch_n_times(path)
    regex_searcher()
コード例 #58
0
ファイル: launch.py プロジェクト: Mhatt12/ChaosRising
mdir = mdir.replace("mdir:", "")
cdir = all_lines[2]
cdir = cdir.replace("\n", "")
cdir = cdir.replace("cdir:", "")
port = all_lines[3]
port = port.replace("\n", "")
port = port.replace("port:", "")
byonddir = all_lines[4]
byonddir = byonddir.replace("\n", "")
byonddir = byonddir.replace("byond_dir:", "")
print(port)
t1 = time.time()

print("Updating git...")

os.chdir("{}civ13-git".format(mdir))
os.system("git pull")
os.system("git reset --hard origin/master")

print("Rebuilding binaries...")

os.system("{}/bin/dm.exe civ13.dme".format(byonddir))

os.system("cd")

print("Copying configuration settings...")

os.system("python3 {}{}scripts/copyconfigfiles.py".format(mdir, cdir))

print("Copying binaries...")
コード例 #59
0
ファイル: demo.py プロジェクト: Shyonokaze/snippets
import numpy as np
import matplotlib.pyplot as plt

from PIL import Image

import DM3lib as dm3

from utilities import calcHistogram, calcDisplayRange

# CONSTANTS

homDir = os.environ['HOME']
relDir = "/work/snippets/py3-DM3reader"
wd = homDir + relDir
os.chdir(wd)

debug = 0

# define command line arguments
parser = argparse.ArgumentParser()

parser.add_argument("file", help="path to DM3 file to parse")
parser.add_argument("-v",
                    "--verbose",
                    help="increase output verbosity",
                    action="store_true")
parser.add_argument("--dump",
                    help="dump DM3 tags in text file",
                    action="store_true")
parser.add_argument("--convert",
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score

import os

os.chdir('D:/HACKATHONS/news article freelancing')
os.getcwd()

df = pd.read_csv("movies_genres.csv", delimiter='\t')
df_genres = df.drop(['plot', 'title'], axis=1)
counts = []
categories = list(df_genres.columns.values)

#Checking number of movies per category
for i in categories:
    counts.append((i, df_genres[i].sum()))
df_stats = pd.DataFrame(counts, columns=['genre', '#movies'])
df_stats.plot(x='genre',
              y='#movies',
              kind='bar',
              legend=False,