示例#1
0
 def __init__(self, plugin_path):
     self.plugin_path = plugin_path
     self.pre_build_hook_path = join_path(plugin_path, 'pre_build_hook')
     self.meta = utils.parse_yaml(join_path(plugin_path, 'metadata.yaml'))
     self.build_dir = join_path(plugin_path, '.build')
     self.build_src_dir = join_path(self.build_dir, 'src')
     self.checksums_path = join_path(self.build_src_dir, 'checksums.sha1')
示例#2
0
    def _make_data_for_template(self):
        """Generates data for spec template

        :returns: dictionary with required data
        """
        data = {
            'name': self.full_name,
            'version': self.full_version,
            'summary': self.meta['title'],
            'description': self.meta['description'],
            'license': ' and '.join(self.meta.get('licenses', [])),
            'homepage': self.meta.get('homepage'),
            'vendor': ', '.join(self.meta.get('authors', [])),
            'year': utils.get_current_year()}

        uninst = utils.read_if_exist(
            join_path(self.plugin_path, "uninstall.sh"))

        preinst = utils.read_if_exist(
            join_path(self.plugin_path, "pre_install.sh"))

        postinst = utils.read_if_exist(
            join_path(self.plugin_path, "post_install.sh"))

        data.update(
            {'postinstall_hook': postinst,
             'preinstall_hook': preinst,
             'uninstall_hook': uninst}
        )

        return data
示例#3
0
def create_structure(struct, prefix=None, update=False):
    """
    Manifests a directory structure in the filesystem

    :param struct: directory structure as dictionary of dictionaries
    :param prefix: prefix path for the structure
    :param update: update an existing directory structure as boolean
    """
    if prefix is None:
        prefix = os.getcwd()
    for name, content in struct.items():
        if isinstance(content, string_types):
            with open(join_path(prefix, name), "w") as fh:
                fh.write(utils.utf8_encode(content))
        elif isinstance(content, dict):
            try:
                os.mkdir(join_path(prefix, name))
            except OSError:
                if not update:
                    raise
            create_structure(struct[name],
                             prefix=join_path(prefix, name),
                             update=update)
        elif content is None:
            pass
        else:
            raise RuntimeError("Don't know what to do with content type "
                               "{type}.".format(type=type(content)))
示例#4
0
文件: backup.py 项目: 52nlp/brat
def _backup(min_interval=MIN_INTERVAL, backup_dir=BACKUP_DIR, data_dir=DATA_DIR):
    b_file, b_mtime = _youngest_backup(backup_dir)
    y_mtime = _datetime_mtime(DATA_DIR)
    #_, y_mtime = _youngest_file(data_dir)
    # If we have a back-up arch and no file has changed since the back-up or
    #       the delay has not expired, return
    if b_file is not None and (y_mtime <= b_mtime
            or (y_mtime - b_mtime) < min_interval):
        return

    # Here we do use UTC
    backup_filename = (_safe_dirname(data_dir) + '-'
            + datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
            + '.' + TAR_GZ_SUFFIX)
    backup_path = abspath(join_path(backup_dir, backup_filename))
    data_dir_parent = join_path(data_dir, '../')

    #TODO: Check the exit signals!
    cmd = 'tar -c -z -f %s -C %s %s' % (backup_path,
        data_dir_parent, _safe_dirname(data_dir))
    tar_p = Popen(split_shlex(cmd))
    tar_p.wait()

    checksum_base = join_path(backup_dir, CHECKSUM_FILENAME)
    with open(checksum_base + '.' + 'MD5', 'a') as md5_file:
        # *NIX could have used m5sum instead
        md5_cmd = 'md5sum %s' % (backup_filename)
        md5_p = Popen(split_shlex(md5_cmd), stdout=md5_file, cwd=backup_dir)
        md5_p.wait()

    with open(checksum_base + '.' + 'SHA256', 'a') as sha256_file:
        sha256_cmd = 'shasum -a 256 %s' % (backup_filename)
        sha256_p = Popen(split_shlex(sha256_cmd), stdout=sha256_file, cwd=backup_dir)
        sha256_p.wait()
示例#5
0
 def test(prefix):
     sofile = join_path(prefix, 'lib/{}'.format(soname))
     incdir = join_path(prefix, 'include/{}'.format(incname))
     if os.path.isfile(sofile) and os.path.isdir(incdir):
         return (join_path(prefix, 'lib'),
                 join_path(prefix, 'include'))
     return None
def edgeR_job(project_path, groups, output, collectjobID, defaults, ppn='1', walltime="12:00:00", filter_threshold=0.5):
    '''Prepare inputs for edgeR and generate a job script'''

    # Generate conditions input file
    condf = open(abspath(join_path(project_path, 'inputs', 'conditions_Rready.csv')), 'w')
    condf.writelines('Sample,Strain,Treatment\n')
    condf.writelines(
        [','.join([sample, 'strain', group + '\n']) for group, samples in groups.items() for sample in samples.keys()])
    condf.close()

    jobstr = []
    jobstr += [job_header.replace('JOBNAME', 'edgeR')
                   .replace('WALLTIME', walltime)
                   .replace('PROJECT', defaults['project'])
                   .replace('DEPEND', 'afterok:{}'.format(collectjobID))
                   .replace('JOB_OUTPUTS', abspath(join_path(project_path, 'job_outputs')))
                   .replace('EMAILADDRESS', defaults['email'])]

    jobstr += ['Rscript {}/edgeR_script.r -p {} -c {} -s {} -o {} -f {}'.format(abspath(join_path(iLoop_RNAseq_pipeline.__path__[0], 'scripts')),
                                                                           project_path,
                                                                           abspath(join_path(project_path,
                                                                                             'results',
                                                                                             'featureCounts_collected.csv')),
                                                                           abspath(join_path(project_path,
                                                                                             'inputs',
                                                                                             'conditions_Rready.csv')),
                                                                           output,
                                                                           str(filter_threshold))]




    return '\n\n'.join(jobstr).replace('PPN', ppn)
示例#7
0
def compile_styles(file=join_path(ROOT, 'less', 'default.less')):
    with open(file, encoding='utf-8') as raw_file:
        raw_text = raw_file.read()

    css = lesscpy.compile(StringIO(raw_text))
    with open(join_path(ROOT, 'css', 'default.css'), 'w', encoding='utf-8') as css_file:
        css_file.write(css)
示例#8
0
def pack_html(html, styles=None, poster='', banner=''):
    if not styles: styles = [join_path(ROOT, 'css','default.css')]
    styles.append(join_path(ROOT, 'css','custom.css'))
    style_tags = ['<link rel="stylesheet" type="text/css" href="{}">'.format(sheet)
         for sheet in styles]

    if len(poster.strip()) > 0:
        poster_tag = '\n<br>\n<img src="{}" alt="poster"/>'.format(poster)
    else: poster_tag = ''

    if len(banner.strip()) > 0:
        banner_tag = '<img src="{}" alt="banner"/>'.format(banner)
    else: banner_tag = ''

    head = """<!DOCTYPE html><html lang="zh-cn">
          <head>
          <meta charset="UTF-8">
          <title>result</title>
          {styles}
          </head>
          <body>
          <div class="wrapper">
          {banner}\n""".format(styles='\n'.join(style_tags),
                               banner=banner_tag)

    foot = """{}\n</div>\n</body>\n</html>""".format(poster_tag)

    result = fix_tbl(fix_img(fix_li(head + html + foot)))
    return result
示例#9
0
    def __init__(self, user, game):
        self.lock = Lock()
        self.game = game
        self.user = user

        try:
            path = config['gameprofile_db']
        except KeyError:
            LOG.error('gameprofile_db path config variable not set')
            return

        # Create gameprofile folder and user folder on the game path
        path = join_path(path, game.slug)
        if not create_dir(path):
            error_msg = 'User GameProfile path \"%s\" could not be created.' % path
            LOG.error(error_msg)
            raise GameProfileError(error_msg)
        self.path = get_absolute_path(path)

        self.defaults = {}
        default_yaml_path = unicode(get_absolute_path(join_path(game.path, 'defaultgameprofiles.yaml')))
        if path_exists(default_yaml_path):
            with open(default_yaml_path, 'r') as f:
                try:
                    file_defaults = yaml.load(f)
                    self.defaults = dict((v['user'], v['value']) for v in file_defaults['profiles'])
                except (yaml.YAMLError, KeyError, TypeError) as e:
                    LOG.error('Failed loading default game profiles: %s', str(e))
示例#10
0
def import_config(file=join_path(ROOT, 'config.json')):
    with open(file, encoding='utf-8') as json_file:
        text = json_file.read()
        json_text = re.search(r'\{[\s\S]*\}', text).group()  # 去除json文件中的注释
    config = json.loads(json_text)

    non_style_keys = ['poster_url', 'banner_url',
                      'convert_list', 'ul_style',
                      'auto_archive', 'auto_rename']

    # 读取配置文件中的变量,最多支持两级嵌套
    cfg_lines = []
    for key, value in config.items():
        if not key in non_style_keys:
            if not isinstance(value, dict):
                cfg_lines.append('@{}: {};\n'.format(key, value))
            else:
                for inner_key, inner_value in value.items():
                    cfg_lines.append('@{}: {};\n'.format(
                        inner_key + '_' + key, inner_value))

    variables = '\n'.join(cfg_lines) + '\n\n'

    with open(join_path(ROOT, 'less', 'styles.less'), encoding='utf-8') as styles_file:
        styles = styles_file.read()
    with open(join_path(ROOT, 'less', 'default.less'), 'w', encoding='utf-8') as default_less:
        default_less.write(variables + styles)
    return config
示例#11
0
 def handle_noargs(self, **options):
     """
     Extract data and save into the database.
     """
     routes_dir = '/home/ragsagar/Downloads/PMPML_TimeTable/route/en/'
     route_files = [join_path(routes_dir, file_) for file_ in
                    listdir(routes_dir) if isfile(join_path(routes_dir, file_))]
     pickle_file = \
         open('/home/ragsagar/projects/myworks/FindMyBus/FindMyBus_dj/dbdump/stop.pkl')
     stop_dict = pickle.load(pickle_file)
     for route_file in route_files:
         soup = BeautifulSoup(open(route_file).read())
         route_stop_names = self.get_stops(soup)
         route_stops = []
         for stop_name in route_stop_names:
             stop, created = Stop.objects.get_or_create(name=stop_name)
             if created:
                 print "New stop ", stop.name
                 stop.latitude = stop_dict[stop_name]['latitude']
                 stop.longitude = stop_dict[stop_name]['longitude']
                 stop.save()
             route_stops.append(stop)
         route_num = self.get_route_number(soup)
         route = Route.objects.create(number=route_num,
                              name=self.get_route_name(soup),
                              from_stop=Stop.objects.get(name=route_stop_names[0]),
                              to_stop=Stop.objects.get(name=route_stop_names[-1]))
         route.stops = route_stops
         route.save()
         print route.name, route.number
示例#12
0
def start():
    #connect to db
    global conn, sock, logfile
    logfile = open(scriptDirectory + '/log.log', 'a');
    printAll('Connecting To Pics DB')
    conn = sqlite3.connect(join_path(scriptDirectory, 'desktopPics.db'))
    c = conn.cursor()
    c.execute('create table if not exists data (name text, url text primary key, liked integer default 0, priority integer default 0, ignore integer default 0)')
    conn.commit()
    serversocket = initSocket()

    dir_path = join_path(scriptDirectory, images_directory)
    if not os.path.exists(dir_path):
        printAll("Created Image Directory: ", dir_path)
        os.makedirs(dir_path)
    createCronJobs()

    #start socket

    while True:
        # Wait for a connection
        #TODO: when get info from client, handle it and close connection, wait to accept another
        sock, client_address = serversocket.accept()
        sock.settimeout(None)
        try:
            # Receive the data in small chunks and retransmit it
            data = sock.recv(1024) #YUNO BLOCK!!!!
            log('received "%s"' % data)
            if data == "":
                continue
            handle(data)
        finally:
            # Clean up the connection
            sock.close()
示例#13
0
def move_old_package(struct, opts):
    """Move old package that may be eventually created without namespace

    Args:
        struct (dict): directory structure as dictionary of dictionaries
        opts (dict): options of the project

    Returns:
        tuple(dict, dict):
            directory structure as dictionary of dictionaries and input options
    """
    old_path = join_path(opts['project'], 'src', opts['package'])
    namespace_path = opts['qual_pkg'].replace('.', os.sep)
    target = join_path(opts['project'], 'src', namespace_path)

    old_exists = opts['pretend'] or isdir(old_path)
    #  ^  When pretending, pretend also an old folder exists
    #     to show a worst case scenario log to the user...

    if old_exists and opts['qual_pkg'] != opts['package']:
        if not opts['pretend']:
            logger.warning(
                '\nA folder %r exists in the project directory, and it is '
                'likely to have been generated by a PyScaffold extension or '
                'manually by one of the current project authors.\n'
                'Moving it to %r, since a namespace option was passed.\n'
                'Please make sure to edit all the files that depend on this '
                'package to ensure the correct location.\n',
                opts['package'], namespace_path)

        utils.move(old_path, target=target,
                   log=True, pretend=opts['pretend'])

    return struct, opts
示例#14
0
def quant_jobs(project_path, sample, mergejob, ref, defaults, ppn='8', walltime='12:00:00'):
    logger.info('Using cuffquant options: {}'.format(defaults['cuffquant_options']))
    jobstr = []
    jobstr += [job_header.replace('JOBNAME', '_'.join([sample] + ['cuffquant']))
                   .replace('WALLTIME', walltime)
                   .replace('PROJECT', defaults['project'])
                   .replace('DEPEND', ('afterok:{}'.format(mergejob) if mergejob != '' else ''))
                   .replace('JOB_OUTPUTS', abspath(join_path(project_path, 'job_outputs')))
                   .replace('EMAILADDRESS', defaults['email'])]

    # make this job depend on successful completion of previous jobs: merge_job
    jobstr += ['#PBS -W depend=afterok:{}'.format(mergejob)]

    jobstr += ['''# Load modules needed by myapplication.x
module load ngs tools cufflinks/2.2.1 tophat/2.1.1 bowtie2/2.2.5''']

    if ref.get('bowtie_indexes'):
        jobstr += ['export BOWTIE_INDEXES={}'.format(ref['bowtie_indexes'])]

    jobstr += ['cuffquant {} -p PPN {} -o {} {} {} {} '.format(defaults['cuffquant_options'],
                                                               ('-M ' + ref['gff_mask']) if ref.get('gff_mask') else '',
                                                               (abspath(join_path(project_path, sample))),
                                                               ('-b ' + ref['fasta_genome']) if ref.get(
                                                                   'fasta_genome') else '',
                                                               (abspath(
                                                                   join_path(project_path, 'cmerge', 'merged_asm',
                                                                                'merged.gtf'))),
                                                               (abspath(join_path(project_path, sample,
                                                                                             'accepted_hits.sorted.bam'))))]

    return '\n\n'.join(jobstr).replace('PPN', ppn)
示例#15
0
文件: redpill.py 项目: enyst/plexnet
def install_setuptools():
    """Install setuptools if it's not already installed."""

    try:
        import pkg_resources
    except:
        SETUPTOOLS = 'setuptools-0.6c11'
        EGG_PATH = '%s-py2.6.egg' % SETUPTOOLS
        SETUPTOOLS_DIR = join_path(THIRD_PARTY, 'distfiles', 'setuptools')
        print_message("Installing %s" % EGG_PATH, ACTION)
        os.chdir(SETUPTOOLS_DIR)
        download_distfile(
            join_path(SETUPTOOLS_DIR, EGG_PATH), DISTFILES_SERVER_BASE+EGG_PATH
            )
        proc = Popen(
            'sh %s --script-dir=%s -O2' % (EGG_PATH, PLEXNET_BIN),
            shell=True
            )
        status = proc.wait()
        if status:
            print_message("Error Installing %s" % SETUPTOOLS, ERROR)
            sys.exit(1)
        print_message("Successfully Installed %s" % SETUPTOOLS, SUCCESS)
        for path in sys.path:
            if isdir(path) and EGG_PATH in listdir(path):
                sys.path.append(join_path(path, EGG_PATH))
示例#16
0
def create_django_proj(struct, opts):
    """Creates a standard Django project with django-admin.py

    Args:
        struct (dict): project representation as (possibly) nested
            :obj:`dict`.
        opts (dict): given options, see :obj:`create_project` for
            an extensive list.

    Returns:
        struct, opts: updated project representation and options

    Raises:
        :obj:`RuntimeError`: raised if django-admin.py is not installed
    """
    if opts.get('update'):
        helpers.logger.warning(UpdateNotSupported(extension='django'))
        return struct, opts

    try:
        shell.django_admin('--version')
    except Exception as e:
        raise DjangoAdminNotInstalled from e

    pretend = opts.get('pretend')
    shell.django_admin('startproject', opts['project'],
                       log=True, pretend=pretend)
    if not pretend:
        src_dir = join_path(opts['project'], 'src')
        os.mkdir(src_dir)
        shutil.move(join_path(opts['project'], opts['project']),
                    join_path(src_dir, opts['package']))

    return struct, opts
示例#17
0
 def build_centos_repos(cls, releases_paths):
     for repo_path in releases_paths:
         repo_packages = join_path(repo_path, 'Packages')
         utils.create_dir(repo_packages)
         utils.move_files_in_dir(
             join_path(repo_path, '*.rpm'),
             repo_packages)
         utils.exec_cmd('createrepo -o {0} {0}'.format(repo_path))
示例#18
0
文件: safe.py 项目: edaniszewski/safe
def get_safe_file_paths(safe_name):
    """ Get the path for the open and closed Safe files.

    :param safe_name: Name of the Safe.
    :return: A tuple which contains the path of the open safe file and the path
        of the closed safe file.
    """
    return join_path(SAFES_PATH, safe_name + '.open'), join_path(SAFES_PATH, safe_name)
示例#19
0
		def windows_createCronJobs():
			if not os.path.exists(join_path(scriptDirectory, 'client.bat')):
				print "Generating client.bat, please schedule a daily task for this batch file"
				with open('client.bat', 'w') as f:
					f.write("python "+join_path(scriptDirectory, 'client.py') +" dailyUpdate")
			if not os.path.exists(join_path(scriptDirectory, 'daemon.bat')):
				print "Generating daemon.bat, please schedule a task to run on boot for this batch file"
				with open('daemon.bat', 'w') as f:
					f.write("start /B python "+join_path(scriptDirectory, 'client.py') +" dailyUpdate")
示例#20
0
def git_tree_add(struct, prefix=""):
    for name, content in struct.items():
        if isinstance(content, string_types):
            git("add", join_path(prefix, name))
        elif isinstance(content, dict):
            git_tree_add(struct[name], prefix=join_path(prefix, name))
        else:
            raise RuntimeError("Don't know what to do with content type "
                               "{type}.".format(type=type(content)))
示例#21
0
 def __init__(self, *args, **kwargs):
     super(ValidatorV3, self).__init__(*args, **kwargs)
     self.deployment_tasks_path = join_path(
         self.plugin_path, 'deployment_tasks.yaml')
     self.network_roles_path = join_path(
         self.plugin_path, 'network_roles.yaml')
     self.node_roles_path = join_path(
         self.plugin_path, 'node_roles.yaml')
     self.volumes_path = join_path(
         self.plugin_path, 'volumes.yaml')
示例#22
0
文件: conf.py 项目: ikn/boom
def find_music (d):
    # find files in the given directory and group by subdirectory (nested)
    music = {}
    base = len(join_path(d, ''))
    for subdir, dirs, files in os.walk(d, followlinks=True):
        music[subdir[base:]] = [join_path(subdir, fn) for fn in files]
        # top-level only
        if subdir != d:
            del dirs[:]
    return music
示例#23
0
def create_challenge(ctf_name, challenge_name):
    """Prepare the storage for a CTF challenge."""
    challenge_dir = join_path(ROOT_DIR, ctf_name, challenge_name)
    shared_link   = rel_path(SHARED_DIR, challenge_dir)

    print('Preparing {storage} for challenge {challenge} of CTF {ctf}'.format(
        storage=challenge_dir, challenge=challenge_name, ctf=ctf_name))

    os.makedirs(challenge_dir)
    os.symlink(shared_link, join_path(challenge_dir, 'shared'))
示例#24
0
def init_category(category, pwd=join_path('.', 'data', 'knowledge')):
    '''
    Initializes the association between concepts and entities of a
    given lexical category.
    '''

    table[category] = [
        entity.from_line(category, elem)
        for elem in run_file(join_path(pwd, category+'.csv'))
    ]
示例#25
0
 def createCronJobs(self):
   # if not os.path.exists(join_path(scriptPath, 'client.bat')):
   #   log.info("Generating client.bat, please schedule a daily task for this batch file")
   #   with open('client.bat', 'w') as f:
   #     f.write("python "+join_path(scriptPath, 'client.py') +" dailyUpdate")
   batch_path = join_path(self.windows_startup_dir, 'daemon.bat')
   if not os.path.exists(batch_path):
     log.info("Generating daemon.bat")
     with open(batch_path, 'w') as f:
       f.write("start " + sys.executable + " " + join_path(scriptPath, 'daemon.py'))
示例#26
0
文件: tests.py 项目: VanL/zerovm-cli
 def test_image_extract(self):
     img1 = self._create_tar({'file1': BytesIO(b'a'),
                              'file2': BytesIO(b'b')})
     img1_dev = '/dev/1.%s' % os.path.basename(img1)
     img2 = self._create_tar({'file3': BytesIO(b'a'),
                              'file4': BytesIO(b'b')})
     img2_dev = '/dev/2.%s' % os.path.basename(img2)
     opts = '--zvm-image=%s --zvm-image=%s,/lib' % (img1, img2)
     self.program = 'file2'
     self.argv = [ZVSH]
     self.argv.extend(opts.split())
     self.argv.append(self.program)
     cmd_line = self.program
     shell = Shell(self.argv)
     try:
         with pytest.raises(SystemExit):
             shell.run()
         files = os.listdir(shell.zvsh.tmpdir)
         self.assertTrue('manifest.1' in files)
         self.assertTrue('nvram.1' in files)
         manifest = _read_manifest(join_path(shell.zvsh.tmpdir,
                                             'manifest.1'))
         boot = os.path.join(os.path.abspath(shell.zvsh.tmpdir), 'boot.1')
         reference = self._reference_manifest(shell.zvsh.tmpdir,
                                              executable=boot)
         channels = reference['channel']
         img1_chan = [os.path.abspath(img1),
                      img1_dev,
                      '3',
                      '0',
                      str(DEFAULT_LIMITS['reads']),
                      str(DEFAULT_LIMITS['rbytes']),
                      str(DEFAULT_LIMITS['writes']),
                      str(DEFAULT_LIMITS['wbytes'])]
         img2_chan = [os.path.abspath(img2),
                      img2_dev,
                      '3',
                      '0',
                      str(DEFAULT_LIMITS['reads']),
                      str(DEFAULT_LIMITS['rbytes']),
                      str(DEFAULT_LIMITS['writes']),
                      str(DEFAULT_LIMITS['wbytes'])]
         # insert in reverse order
         channels.insert(3, img2_chan)
         channels.insert(3, img1_chan)
         self.assertEqual(manifest, reference)
         nvram = _read_nvram(join_path(shell.zvsh.tmpdir,
                                       'nvram.1'))
         reference = _reference_nvram(
             cmd_line,
             images=[(img1_dev, '/', 'ro', 'no'),
                     (img2_dev, '/lib', 'ro', 'no')])
         self.assertEqual(nvram, reference)
     finally:
         shell.zvsh.orig_cleanup()
示例#27
0
文件: tests.py 项目: VanL/zerovm-cli
 def test_input_output_file(self):
     in_fd, in_name = mkstemp(dir=self.testdir)
     os.write(in_fd, b'test')
     os.close(in_fd)
     out_fd, out_name = mkstemp(dir=self.testdir)
     os.close(out_fd)
     os.unlink(out_name)
     self.assertFalse(os.path.exists(out_name))
     opts = '-i @%s -o @%s' % (in_name, out_name)
     in_dev_name = '/dev/1.%s' % os.path.basename(in_name)
     out_dev_name = '/dev/2.%s' % os.path.basename(out_name)
     cmd_line = ' '.join([self.program, '-i %s -o %s'
                                        % (in_dev_name, out_dev_name)])
     self.argv.extend(opts.split())
     shell = Shell(self.argv)
     try:
         with pytest.raises(SystemExit):
             shell.run()
         files = os.listdir(shell.zvsh.tmpdir)
         self.assertTrue('manifest.1' in files)
         self.assertTrue('nvram.1' in files)
         manifest = _read_manifest(join_path(shell.zvsh.tmpdir,
                                             'manifest.1'))
         reference = self._reference_manifest(shell.zvsh.tmpdir)
         channels = reference['channel']
         file_chan = [out_name,
                      out_dev_name,
                      '3',
                      '0',
                      str(DEFAULT_LIMITS['reads']),
                      str(DEFAULT_LIMITS['rbytes']),
                      str(DEFAULT_LIMITS['writes']),
                      str(DEFAULT_LIMITS['wbytes'])]
         channels.insert(3, file_chan)
         file_chan = [in_name,
                      in_dev_name,
                      '3',
                      '0',
                      str(DEFAULT_LIMITS['reads']),
                      str(DEFAULT_LIMITS['rbytes']),
                      str(DEFAULT_LIMITS['writes']),
                      str(DEFAULT_LIMITS['wbytes'])]
         channels.insert(3, file_chan)
         self.assertEqual(manifest, reference)
         nvram = _read_nvram(join_path(shell.zvsh.tmpdir,
                                       'nvram.1'))
         reference = _reference_nvram(cmd_line,
                                      [{'channel': in_dev_name,
                                        'mode': 'file'},
                                       {'channel': out_dev_name,
                                        'mode': 'file'}])
         self.assertEqual(nvram, reference)
         self.assertTrue(os.path.exists(out_name))
     finally:
         shell.zvsh.orig_cleanup()
示例#28
0
def init(rule_dict, pwd=join_path('.', 'data', 'rules')):
    '''Initialize rules from file.'''

    for elem in run_file(join_path(pwd, 'rules_list.csv')):
        rule_dict[elem] = Rule(elem)

    rules_list = ['nominal', 'verbal']
    for rule_name in rules_list:
        for elem in run_file(join_path(pwd, rule_name+'.csv')):
            name, par, subs = elem.split(',')
            rule_dict[name].insert(par, subs)
示例#29
0
def check_fastqc(groups, output_path):
    """Checks if fastqc files exists and are not empty."""
    for readf in [readf for group in groups.values() for readfs in group.values() for readf in readfs]:
        qcf = readf.split('.')[0].split('/')[-1]
        if (not os.path.isfile(join_path(output_path, qcf + '_fastqc.zip'))) or \
                (not os.path.isfile(join_path(output_path, qcf + '_fastqc.html'))):
            return False
        elif (os.path.getsize(join_path(output_path, qcf + '_fastqc.zip')) == 0) or \
                (os.path.getsize(join_path(output_path, qcf + '_fastqc.html')) == 0):
            return False
    return True
示例#30
0
 def configure_responses(self):
     httpretty.register_uri(
         httpretty.GET,
         join_path(self.node_settings.api_url, 'articles'),
         body=dumps(self.figshare.articles.return_value)
     )
     httpretty.register_uri(
         httpretty.GET,
         join_path(self.node_settings.api_url, 'articles', '902210'),
         body=dumps(self.figshare.article.return_value)
     )
示例#31
0
def main():
    """Main app function."""
    # Create the app
    app = QApplication(sys.argv)

    # Setup app translator
    app_translator = QTranslator()

    if exists(join_path(BASE_DIR, VM_PATHS.i18n)):
        app_translator.load(
            join_path(BASE_DIR, VM_PATHS.i18n,
                      'videomorph_{0}'.format(LOCALE)))
    else:
        app_translator.load(
            join_path(SYS_PATHS.i18n, 'videomorph_{0}'.format(LOCALE)))

    app.installTranslator(app_translator)
    qt_translator = QTranslator()
    qt_translator.load("qt_" + LOCALE,
                       QLibraryInfo.location(QLibraryInfo.TranslationsPath))
    app.installTranslator(qt_translator)

    # Run the app
    run_app(app=app)
示例#32
0
def load_dataset(text_field, label_field, args, **kwargs):
    train_dataset, dev_dataset, test_dataset = get_dataset(
        join_path(dirname(path_root), 'data'), text_field, label_field)
    if args.static and args.pretrained_name and args.pretrained_path:
        vectors = load_word_vectors(args.pretrained_name, args.pretrained_path)
        text_field.build_vocab(train_dataset, dev_dataset, vectors=vectors)
    else:
        text_field.build_vocab(train_dataset, dev_dataset)
    label_field.build_vocab(train_dataset, dev_dataset)
    train_iter, dev_iter = data.Iterator.splits(
        (train_dataset, dev_dataset),
        batch_sizes=(args.batch_size, args.batch_size),
        sort_key=lambda x: len(x.text),
        **kwargs)
    return train_iter, dev_iter
def find_xml_obs(obs, frequency, variable):
    file_name = join_path(
        xmldir, 'obs_' + str(obs) + '_glob_' + str(frequency) + '_O.xml')
    xml = CDMS2open(file_name)
    listvar1 = sorted(xml.listvariables())
    if variable not in listvar1:
        print '\033[95m' + str().ljust(5) + "obs var " + str(
            variable) + " cannot be found" + '\033[0m'
        print '\033[95m' + str().ljust(10) + "file_name = " + str(
            file_name) + '\033[0m'
        print '\033[95m' + str().ljust(10) + "variables = " + str(
            listvar1) + '\033[0m'
        exit(1)
    file_area, file_land = find_xml_fx(obs)
    return file_name, file_area, file_land
示例#34
0
    def __init__(self, fs=8000, fl=100, fh=4000, frame_len_ms=25, n_jobs=20, n_ceps=20, save_loc='../save'):
        mfcc_loc = join_path(save_loc, MFCC_DIR)
        params_file = join_path(mfcc_loc, 'mfcc.params')
        config_file = join_path(mfcc_loc, 'mfcc.conf')

        with open(params_file, 'w') as f:
            f.write('nj={}\n'.format(n_jobs))
            f.write('compress={}\n'.format('true'))
            f.write('mfcc_loc={}\n'.format(mfcc_loc))
            f.write('mfcc_config={}\n'.format(config_file))

        with open(config_file, 'w') as f:
            f.write('--sample-frequency={}\n'.format(fs))
            f.write('--low-freq={}\n'.format(fl))
            f.write('--high-freq={}\n'.format(fh))
            f.write('--frame-length={}\n'.format(frame_len_ms))
            f.write('--num-ceps={}\n'.format(n_ceps))
            f.write('--snip-edges={}\n'.format('false'))

        self.mfcc_loc = mfcc_loc
        self.save_loc = save_loc
        self.params_file = params_file
        self.n_ceps = n_ceps
        self.n_jobs = n_jobs
示例#35
0
def makeGeosparqlFusekiProcess(modelPath: AnyStr, dataPath: AnyStr):
    geosparqlFusekiProcess = Popen(
        split(f'java -jar target/ServiceGeoSparqlObjetTerritoire-1.0-SNAPSHOT-jar-with-dependencies.jar {modelPath} {dataPath}'),
        cwd=join_path(dirname(dirname(dirname(abspath(__file__)))), 'ServiceGeoSparqlObjetTerritoire'),
        stderr=PIPE,
        stdout=PIPE,
    )

    while True:
        stdout = geosparqlFusekiProcess.stdout.readline()
        if b'Start Fuseki' in stdout:
            print(stdout.decode())
            break

    return geosparqlFusekiProcess
示例#36
0
def move_old_package(struct, opts):
    """Move old package that may be eventually created without namespace

    Args:
        struct (dict): directory structure as dictionary of dictionaries
        opts (dict): options of the project

    Returns:
        tuple(dict, dict):
            directory structure as dictionary of dictionaries and input options
    """

    old_path = join_path(opts['project'], opts['package'])
    namespace_path = opts['namespace_pkg'].replace('.', os.sep)
    target = join_path(opts['project'], namespace_path)

    old_exists = opts['pretend'] or isdir(old_path)
    #  ^  When pretending, pretend also an old folder exists
    #     to show a worst case scenario log to the user...

    if old_exists and opts['namespace_pkg'] != opts['package']:

        if not opts['pretend']:
            logger.warning(
                '\nA folder %r exists in the project directory, and it is '
                'likely to have been generated by a PyScaffold extension or '
                'manually by one of the current project authors.\n'
                'Moving it to %r, since a namespace option was passed.\n'
                'Please make sure to edit all the files that depend on this'
                'package to ensure the correct location.\n',
                opts['package'], namespace_path)

        utils.move(old_path, target=target,
                   log=True, pretend=opts['pretend'])

    return (struct, opts)
示例#37
0
def plot_embeddings(embeddings, ):
    data_path = join_path(dirname(dirname(__file__)),
                          'data/bello_kg/graph_labels_last_version.txt')
    data_path = join_path(dirname(dirname(__file__)),
                          'data/bello_kg/graph_labels_v1.1.txt')
    X, Y = read_node_label(data_path)

    emb_list = []
    for k in X:
        emb_list.append(embeddings[k])
    emb_list = np.array(emb_list)

    model = TSNE(n_components=2)
    node_pos = model.fit_transform(emb_list)

    color_idx = {}
    for i in range(len(X)):
        color_idx.setdefault(Y[i][0], [])
        color_idx[Y[i][0]].append(i)

    for c, idx in color_idx.items():
        plt.scatter(node_pos[idx, 0], node_pos[idx, 1], label=c)
    plt.legend()
    plt.show()
示例#38
0
def apply_update_rules(rules, struct, prefix=None):
    """Apply update rules using :obj:`~.FileOp` to a directory structure

    Args:
        rules (dict): directory structure as dictionary of dictionaries with
                      :obj:`~.FileOp` keys. The structure will be modified.
        struct (dict): directory structure as dictionary of dictionaries
        prefix (str): prefix path for the structure

    Returns:
        dict: directory structure with keys removed according to the rules
    """
    if prefix is None:
        prefix = os.getcwd()
    for k, v in rules.items():
        if isinstance(v, dict):
            apply_update_rules(v, struct[k], join_path(prefix, k))
        else:
            path = join_path(prefix, k)
            if v == FileOp.NO_OVERWRITE and os.path.exists(path):
                struct.pop(k, None)
            elif v == FileOp.NO_CREATE:
                struct.pop(k, None)
    return struct
示例#39
0
    async def _refresh(self):
        """
        Update files structure
        :return:
        """
        new_files = walk(self._directory, self._url_prefix)
        old_files = self._files
        self._files = new_files
        with ProcessPoolExecutor() as executor:
            for product, software in new_files.items():
                archive_directory = join_path(self._directory, product)
                archive_path = join_path(archive_directory, self._archive_name(product))

                if (
                    not old_files
                    or not old_files.get(product)
                    or not isfile(archive_path)
                    or not compare_latest_software(
                        old_files[product], new_files[product]
                    )
                ):
                    logging.debug("Update archive " + product)
                    await self._loop.run_in_executor(executor, archive, software, archive_path)

                    # Remove other old archives
                    for file in listdir(archive_directory):
                        if file.endswith(".zip"):  # archive
                            if basename(archive_path) != file:  # old archive
                                try:
                                    remove(join_path(archive_directory, file))
                                except OSError as err:
                                    logging.error(f"Unable to remove {file} {err}")

                    self._archives[product] = ArchiveInfo.fromfile(
                        archive_path, self._url_prefix
                    )
示例#40
0
def create_django_proj(struct, opts):
    """Creates a standard Django project with django-admin.py

    Args:
        struct (dict): project representation as (possibly) nested
            :obj:`dict`.
        opts (dict): given options, see :obj:`create_project` for
            an extensive list.

    Returns:
        struct, opts: updated project representation and options

    Raises:
        :obj:`RuntimeError`: raised if django-admin.py is not installed
    """
    if opts.get("update"):
        helpers.logger.warning(UpdateNotSupported(extension="django"))
        return struct, opts

    try:
        django_admin("--version")
    except Exception as e:
        raise DjangoAdminNotInstalled from e

    pretend = opts.get("pretend")
    django_admin("startproject", opts["project"], log=True, pretend=pretend)
    if not pretend:
        src_dir = join_path(opts["project"], "src")

        os.mkdir(src_dir)
        shutil.move(
            join_path(opts["project"], opts["project"]),
            join_path(src_dir, opts["package"]),
        )

    return struct, opts
 def test_existing_file(self):
     fd, name = mkstemp(dir=self.testdir)
     os.write(fd, b'test')
     os.close(fd)
     opts = '-f @%s' % name
     dev_name = '/dev/1.%s' % os.path.basename(name)
     cmd_line = ' '.join([self.program, '-f %s' % dev_name])
     self.argv.extend(opts.split())
     shell = Shell(self.argv)
     try:
         with pytest.raises(SystemExit):
             shell.run()
         files = os.listdir(shell.zvsh.tmpdir)
         self.assertTrue('manifest.1' in files)
         self.assertTrue('nvram.1' in files)
         manifest = _read_manifest(
             join_path(shell.zvsh.tmpdir, 'manifest.1'))
         reference = self._reference_manifest(shell.zvsh.tmpdir)
         channels = reference['channel']
         file_chan = [
             name, dev_name, '3', '0',
             str(DEFAULT_LIMITS['reads']),
             str(DEFAULT_LIMITS['rbytes']),
             str(DEFAULT_LIMITS['writes']),
             str(DEFAULT_LIMITS['wbytes'])
         ]
         channels.insert(3, file_chan)
         self.assertEqual(manifest, reference)
         nvram = _read_nvram(join_path(shell.zvsh.tmpdir, 'nvram.1'))
         reference = _reference_nvram(cmd_line, [{
             'channel': dev_name,
             'mode': 'file'
         }])
         self.assertEqual(nvram, reference)
     finally:
         shell.zvsh.orig_cleanup()
示例#42
0
def main():
    """Produces a features file for models to be trained on.

    For command line help, run with the '-h' flag.

    Writes:
        An output CSV file with the targets, labels and feature representations.
    """
    args = get_args()
    dataset, chrom, _ = common.process_args(args)
    if not dataset: return

    out_file = join_path(
        args.path, consts.DATA_DIR, OUT_FILE % (dataset.name, chrom.num))
    fd_out = open(out_file, 'w')
    handlers = get_column_handlers()

    out_dir = dataset.get_out_path()
    in_file = join_path(out_dir, consts.AGG_OUT_NAME % chrom.num)
    def label_getter(target):
        return dataset.get_value(args.chr, target, dataset.genome_label_idx)
    parse_features(in_file, handlers, fd_out, label_getter)

    fd_out.close()
示例#43
0
def mainWebserver():
    '''
    Starts the main webserver on the declared port.
    '''

    app = TornadoApplication([(r'/', MainHandler),
                              (r'/(.*)', StaticFileHandler, {
                                  'path': join_path(dirname(__file__),
                                                    'static')
                              })])

    app.listen(8889)  # PORT

    f1 = tornado.ioloop.IOLoop.current().start()
    t1 = Thread(target=f1)  # threading library
    t1.start()
示例#44
0
 def __init__(self,
              timezone="UTC",
              strip=True,
              db_location=join_path(_determine_home_dir(), "db.json")):
     """
     :param str timezone: An ISO compliant timezone string. Defaults to UTC
     :param bool strip: Should strip strings from whitespaces before writing.
     :param str db_location: DB json path. Defaults to home directory with file name "db.json"
     """
     self.__db_location = db_location
     self.timezone = timezone
     self.should_strip = strip
     self.__db = TinyDB(db_location)
     self.__location = db_location
     self._logger = logging.getLogger("Crawler")
     self._logger.debug("Opened tinydb at %s" % db_location)
def save_video(video_file, title):
    ext = secure_filename(video_file.filename).split('.')[-1]
    video_hash = md5(video_file.read()).hexdigest()
    video_file.seek(0)

    video = Video(title)
    video.save(video_hash, cur_user())
    videos.save(video_file, folder=str(video.id), name='video.' + ext)
    video.add_path(join_path(app.config['VIDEO_SAVE_PATH'], video.id))

    try:
        prepare_video(video.id, ext)
    except OSError:
        video.delete_video()
        return None
    return video
    def _convert_and_write_labels(self, labels_folder_path, dataset_part,
                                  image_info, category_number, category_id):
        image_size = image_info['height'], image_info['width']
        image_annotations = self._image_annotation_getter.get_image_annotations(
            dataset_part=dataset_part,
            image_id=image_info['id'],
            category_id=category_id,
            iscrowd=None)
        label_file_name = f"{image_info['file_name'].split('.')[0]}.txt"
        label_file_path = join_path(labels_folder_path, label_file_name)
        self._clean_file_content(label_file_path)

        for object_annotation in image_annotations:
            coco_bbox = object_annotation['bbox']
            self._write_label_to_txt_file(label_file_path, category_number,
                                          image_size, coco_bbox)
示例#47
0
    def test_chat(self):
        # go through all chats in test chats
        for ch in folder_traversal.traverse_folder(join_path(ROOT_DIR, "test", "messages")):
            chat = chat_decoder.add_all_data(ch)

            # check whether when we ask for the responses we get them in ordder of appereance (and we get all of them)
            msg_at = 0
            chat_messages = chat.messages

            # we go through the response messages while simultaneously checking chat messages
            for response in chat.get_responses():
                for msg in response.messages:
                    if msg == chat_messages[msg_at]:
                        msg_at += 1
                    else:
                        self.fail("The messages we get from get_response is not the same as the messages in chat")
    def move_after_download(self):
        """
        Moves the downloaded file to its destined filepath.
        """
        if getcwd() == self.filepath:
            return None

        try:
            original_path = join_path(getcwd(), self.video_title + self.format)

            # * moving the file to self.filepath
            move_file(original_path, self.filepath)
            print("File moved to", self.filepath)

        except Exception as e:
            print(e)
示例#49
0
    def get_keys(self):
        key_files = os.listdir(self.path)
        list_array = []

        for key_file in key_files:
            try:
                f = open(unicode(join_path(self.path, key_file)), 'r')
                (key, ext) = os.path.splitext(key_file)
                if (ext == '.txt'):
                    try:
                        list_array.append(key)
                    finally:
                        f.close()
            except IOError, e:
                LOG.error('Failed listing userdata: %s', str(e))
                raise UserDataError
示例#50
0
 def get(self, usernames):
     path = self.path
     game_profiles = {}
     with self.lock:
         for username in usernames:
             profile_path = join_path(path, username + '.txt')
             try:
                 with open(unicode(profile_path), 'r') as fin:
                     value = fin.read()
             except IOError:
                 if username in self.defaults:
                     value = self.defaults[username]
                 else:
                     continue
             game_profiles[username] = {'value': value}
     return {'profiles': game_profiles}
示例#51
0
def beam_complexity(cnx, study_instance_uid):
    """
    :param cnx: connection to DVHA SQL database
    :type cnx: DVH_SQL
    :param study_instance_uid: study_instance_uid in SQL database
    :type study_instance_uid: str
    """

    rt_plan_query = cnx.query('DICOM_Files', 'folder_path, plan_file',
                              "study_instance_uid = '%s'" %
                              study_instance_uid)[0]
    rt_plan_file_path = join_path(rt_plan_query[0], rt_plan_query[1])

    rt_plan = dicom.read_file(rt_plan_file_path)

    for beam_num, beam in enumerate(rt_plan.BeamSequence):
        try:
            condition = "study_instance_uid = '%s' and beam_number = '%s'" % (
                study_instance_uid, (beam_num + 1))
            meterset = float(cnx.query('Beams', 'beam_mu', condition)[0][0])
            mlca_data = BeamAnalyzer(beam, meterset, ignore_zero_mu_cp=True)
            mlc_keys = ['area', 'x_perim', 'y_perim', 'cmp_score', 'cp_mu']
            summary_stats = {
                key: calc_stats(mlca_data.summary[key])
                for key in mlc_keys
            }

            column_vars = {
                'area': 'area',
                'x_perim': 'x_perim',
                'y_perim': 'y_perim',
                'complexity': 'cmp_score',
                'cp_mu': 'cp_mu'
            }
            stat_map = {'min': 5, 'mean': 3, 'median': 2, 'max': 0}

            for c in list(column_vars):
                for s in list(stat_map):
                    value = summary_stats[column_vars[c]][stat_map[s]]
                    column = "%s_%s" % (c, s)
                    cnx.update('Beams', column, value, condition)
            cnx.update('Beams', 'complexity',
                       np.sum(mlca_data.summary['cmp_score']), condition)
        except Exception as e:
            msg = 'db.update.beam_complexity: MLC Analyzer fail for beam number %s and uid %s' % \
                  ((beam_num+1), study_instance_uid)
            push_to_log(e, msg=msg)
示例#52
0
def get_files(excluded_paths, filter=lambda x: True):
    for name in listdir('.'):
        if name in excluded_paths:
            continue

        if isdir(name):
            stack = [name]
            while stack:
                base = stack.pop()
                for name in listdir(base):
                    path = join_path(base, name)
                    if isdir(path):
                        stack.append(path)
                    elif filter(name):
                        yield path
        elif filter(name):
            yield name
示例#53
0
    def run_epoch(epoch,
                  model,
                  loader,
                  optimizer=None,
                  scheduler=None,
                  mode='train'):
        is_train = mode == 'train'
        is_test = mode == 'test'
        model.eval()
        if is_train:
            model.train()

        iters = len(loader)
        preds, targets, losses, accs = None, None, None, None
        for i, (input, target) in enumerate(loader, start=1):
            input, target = input.to(device), target.to(device)
            if is_train:
                for param in model.parameters():
                    param.grad = None
            with torch.set_grad_enabled(is_train):
                pred, features = model(input)
                pred = pred.view(-1)
                target = target.view(-1)
                loss = criterion(pred, target)
                pred = torch.sigmoid(pred)
                y = pred.round()
                acc = (y == target).half()
                preds = pred.detach() if preds is None else torch.cat(
                    (preds, pred.detach()), dim=0)
                targets = target.detach() if targets is None else torch.cat(
                    (targets, target.detach()), dim=0)
                accs = acc.detach() if accs is None else torch.cat(
                    (accs, acc.detach()), dim=0)
                losses = loss.detach() if losses is None else torch.cat(
                    (losses, loss.detach()), dim=0)
                if is_train:
                    loss.mean().backward()
                    optimizer.step()
                    scheduler.step(epoch + i / iters)
        m = performance(preds.cpu().numpy(), targets.cpu().numpy())
        if is_test:
            filename = join_path(opts.checkpoints_dir, 'reliability_curve.png')
            evaluate_calibration(preds.cpu().numpy(),
                                 targets.cpu().numpy(), opts.model_name,
                                 filename)
        return losses.mean().item(), accs.mean().item(), m.f1score
示例#54
0
    def save_data_multi_lines(self,
                              data,
                              prefix="",
                              batch_size=50000,
                              build_dict=False):
        """
        In order to shuffle fully, there is no need to load all data if
        each file only contains one sample, it only needs to shuffle list
        of file name. But one file contains multi lines, each line is one
        sample. It needs to read all data into memory to shuffle fully.
        This interface is mainly for data containning multi lines in each
        file, which consumes more memory if there is a great mount of data.

        data: the Dataset object to process.
        prefix: the prefix of each batch.
        batch_size: number of data in each batch.
        build_dict: whether to build dictionary for data

        return: list of batch names
        """
        assert self.multi_lines
        label_list = []
        data_list = []

        # read all data
        label_list, data_list = self.get_data_list(0, len(data), data)
        if build_dict:
            self.create_dict(data_list)

        length = len(label_list)
        perm_list = np.array([i for i in xrange(length)])
        random.shuffle(perm_list)

        num_batches = int(math.ceil(length / float(batch_size)))
        batch_names = []
        for i in range(num_batches):
            batch_name = join_path(self.output_path,
                                   "%s_part_%03d" % (prefix, i))
            begin = i * batch_size
            end = min((i + 1) * batch_size, length)
            sub_label = [label_list[perm_list[i]] for i in range(begin, end)]
            sub_data = [data_list[perm_list[i]] for i in range(begin, end)]
            self.save_file(sub_label, sub_data, batch_name)
            batch_names.append(batch_name)

        return batch_names
示例#55
0
def install_package(name, packages_root=THIRD_PARTY_PACKAGES_ROOT):
    """Read the build file for the given package name."""

    package_name = name.lower()

    if package_name in PACKAGES:
        return

    build_file = join_path(packages_root, package_name, 'build.py')
    builtins = BUILTINS.copy()
    local = {}

    if not isfile(build_file):
        print_message("Couldn't find %s" % build_file, ERROR)

    execfile(build_file, builtins, local)

    if 'versions' not in local:
        print_message(
            "Couldn't find 'versions' variable in build.py for %s" % name,
            ERROR)

    versions = local['versions']
    latest = versions[-1]

    if 'packages' not in local:
        packages = {latest: {}}
    else:
        packages = local['packages']

    PACKAGES[package_name] = {
        'latest': latest,
        'packages': packages,
        'versions': versions,
        'type': local.get('type', 'default')
    }

    if 'deps' in packages:
        for dep in packages['deps']:
            install_package(dep)

    for version in versions:
        package = packages[version]
        if 'deps' in package:
            for dep in package['deps']:
                install_package(dep)
示例#56
0
 def run(self) -> TestCaseResult:
     self.__start_timestamp = dt.datetime.utcnow()
     self.__stage_start_timestamp = self.__start_timestamp
     current_locale = locale.getlocale()
     try:
         locale.setlocale(locale.LC_ALL, 'C')
         if self.__prepare_dependencies() is not None:
             return self.__result
         if self.__test_frontend() is not None:
             return self.__result
         if self.__test_backend() is not None:
             return self.__result
         if self.skip_native:
             return self.__result_success()
         if self.__test_with_native_compiler() is not None:
             return self.__result
         if self.compare_with_expected_output:
             if self.__test_native_compiler_link() is not None:
                 return self.__result
             if self.__read_symbols() is not None:
                 return self.__result
             if self.__has_main_symbol():  # Linked file is executable
                 # Execute the linked file
                 if self.__run_executable() is not None:
                     return self.__result
                 if self.__compare_output_files(
                         self.exec_result_out_file,
                         self.expected_output_in_file,
                         TestCaseStageID.EXPECTED_OUTPUT) is not None:
                     return self.__result
         if self.compare_with_reference:
             # Decompile file without line information, then compare to reference
             if self.__decompile() is not None:
                 return self.__result
             if self.__compare_output_files(
                     self.reference_in_file,
                     join_path(self.working_dir,
                               self.decompiled_src_out_file_basename),
                     TestCaseStageID.REFERENCE_OUTPUT) is not None:
                 return self.__result
         return self.__result_success()
     except:
         except_str = traceback.format_exc()
         return self.__result_error(exception=except_str)
     finally:
         locale.setlocale(locale.LC_ALL, current_locale)
示例#57
0
    def generate_bootstrap_theme(self):
        dir = frappe.utils.get_bench_path() + '/sites/assets/' + get_site_name(
        ) + '/theme/css'
        is_dir_exists(dir)

        from subprocess import Popen, PIPE
        file_name = 'custom_theme.css'
        output_path = join_path(dir, file_name)
        content = self.theme_scss or ''
        content = content.replace('\n', '\\n')
        command = ['node', 'generate_bootstrap_theme.js', output_path, content]
        process = Popen(command,
                        cwd=frappe.get_app_path('frappe', '..'),
                        stdout=PIPE,
                        stderr=PIPE)
        self.bench_build()
        frappe.msgprint(_('Compiled Successfully'), alert=True)
示例#58
0
def save_import(text, docid, collection=None):
    '''
    TODO: DOC:
    '''

    directory = collection
    #print directory

    if directory is None:
        dir_path = DATA_DIR
    else:
        #XXX: These "security" measures can surely be fooled
        if (directory.count('../') or directory == '..'):
            raise InvalidDirError(directory)

        dir_path = real_directory(directory)

    # Is the directory a directory and are we allowed to write?
    if not isdir(dir_path):
        raise InvalidDirError(dir_path)
    if not access(dir_path, W_OK):
        raise NoWritePermissionError(dir_path)

    base_path = join_path(dir_path, docid)
    #print base_path
    txt_path = base_path + '.' + TEXT_FILE_SUFFIX
    ann_path = base_path + '.' + JOINED_ANN_FILE_SUFF

    # Before we proceed, verify that we are not overwriting
    for path in (txt_path, ann_path):
        if isfile(path):
            raise FileExistsError(path)

    # Make sure we have a valid POSIX text file, i.e. that the
    # file ends in a newline.
    if text != "" and text[-1] != '\n':
        text = text + '\n'

    lemmatized_text1 = lemmatizer(text)
    real_lemmatized_text = lemmatizer2(text)
    lemmatized_text = list(izip_longest(lemmatized_text1,
                                        real_lemmatized_text))
    conll_text = conll(lemmatized_text)
    standoff_main(conll_text, docid)

    return {'document': docid}
示例#59
0
def read_manifest(game_path, manifest_name):
    """
    Try reading manifest game data in dictionary form from game_path.
    """
    try:
        game_path = get_absolute_path(game_path)
        game_path = join_path(game_path, manifest_name)
        f = open(unicode(game_path), 'r')
        try:
            data = yaml.load(f)
        finally:
            f.close()
    except IOError as e:
        LOG.error('Failed loading manifest: %s', str(e))
        raise GameError
    else:
        return data
示例#60
0
def gather_local_filelisting(directory=PLEXNET_LOCAL, gathered=None):
    """Return a set of all resources inside the given ``directory``."""

    if gathered is None:
        if not isdir(directory):
            return set()
        gathered = set()

    for item in listdir(directory):
        path = join_path(directory, item)
        if isdir(path):
            gathered.add(path + '/')
            gather_local_filelisting(path, gathered)
        else:
            gathered.add(path)

    return gathered