Пример #1
0
def copy_release():

    if path_exists(STATIC_OUTPUT_PATH):
        rmtree(STATIC_OUTPUT_PATH)

    for pattern in RELEASE_PAGE_FILES:
        for f in glob(path_join(STATIC_TEMPLATE_PATH, pattern)):
            srcfile = normpath(f)
            dstfile = normpath(path_join(STATIC_OUTPUT_PATH, relpath(f, STATIC_TEMPLATE_PATH)))
            dst_dir = dirname(dstfile)
            if dst_dir != "" and not path_exists(dst_dir):
                makedirs(dst_dir)
            shutil_copy(srcfile, dstfile)

    for f in RELEASE_FILES:
        srcfile = normpath(f)
        dstfile = normpath(path_join(STATIC_OUTPUT_PATH, f))
        dst_dir = dirname(dstfile)
        if dst_dir != "" and not path_exists(dst_dir):
            makedirs(dst_dir)
        shutil_copy(srcfile, dstfile)

    shutil_copy("benchmark.canvas.js", normpath(path_join(STATIC_OUTPUT_PATH, "benchmark.canvas.js")))

    shutil_copytree(normpath('staticmax'), path_join(STATIC_OUTPUT_PATH, 'staticmax'))

    copy_release_capture(config_name=DEFAULT_CAPTURE_NAME)
Пример #2
0
def fetch_and_archive(service, email, archive_path, mid_list):

    logger.info(
        'fetch_and_archive started. email: %s, archive_path: %s, mid_list: %d message(s)' %
        (email, archive_path, len(mid_list))
    )

    if path_isabs(archive_path):
        output_dir = realpath(archive_path)
    else:
        output_dir = realpath(expanduser(path_join(getcwd(), archive_path)))

    count = 0
    error = 0

    for mid in mid_list:

        file_name = path_join(output_dir, ('%x.gz' % mid))
        message = fetch_mail(service, email, mid)

        if not message:
            error += 1
            continue

        with gzip_open(file_name, 'wb') as f:
            f.write(urlsafe_b64decode(message['raw']))
            logger.debug('Message id %x gzipped to %s.' % (mid, file_name))

        count += 1

    logger.info('fetch_and_archive completed. Total %d item(s) saved. Error %d item(s).' % (count, error))
Пример #3
0
    def sub(self, path):
        """Returns AppDir instance for given subdirectory name."""

        if is_collection(path):
            path = path_join(path)

        return AppDir(path_join(self.path, path))
Пример #4
0
 def serve_dir(self, path, system_path):
     if path and not path.endswith('/'):
         self.out_buffer += REDIRECT % (get_timestamp(), path + '/')
         self.timeout = 0
     else:
         try:
             items_dir = [item for item in listdir(system_path)
                             if isdir(path_join(system_path, item))]
             items_file = [item for item in listdir(system_path)
                             if isfile(path_join(system_path, item))]
             items_dir.sort()
             items_file.sort()
             if path:
                 items_dir.insert(0, '..')
             markup = [ITEM_DIR % (quote(item), item)
                         for item in items_dir]
             markup.extend([ITEM_FILE % (quote(item), item)
                                 for item in items_file])
             content = DIR_VIEW % ("".join(markup))
         except Exception, msg:
             content = DIR_VIEW % """<li style="color:#f30">%s</li>""" % msg
         self.out_buffer += RESPONSE_OK_CONTENT % (
             get_timestamp(),
             '',
             "text/html",
             len(content),
             content)
         self.timeout = 0
Пример #5
0
    def __init__(self, name='sid', dir=path_join(WORK_DIR, 'sessions'),
            path=None, domain=None, max_age=None):

        self._name = name
        now = datetime.utcnow();

        # blank cookie
        self._cookie = SimpleCookie()

        if environ.has_key('HTTP_COOKIE'):
            # cookie already exists, see what's in it
            self._cookie.load(environ['HTTP_COOKIE'])

        try:
            # what's our session ID?
            self.sid = self._cookie[name].value;
        except KeyError:
            # there isn't any, make a new session ID
            remote = environ.get('REMOTE_ADDR')
            self.sid = sha224('%s-%s' % (remote, now)).hexdigest()

        self._cookie.clear();
        self._cookie[name] = self.sid

        # set/reset path
        if path:
            self._cookie[name]['path'] = path
        else:
            self._cookie[name]['path'] = ''

        # set/reset domain
        if domain:
            self._cookie[name]['domain'] = domain
        else:
            self._cookie[name]['domain'] = ''

        # set/reset expiration date
        if max_age:
            if isinstance(max_age, int):
                max_age = timedelta(seconds=max_age)
            expires = now + max_age
            self._cookie[name]['expires'] = expires.strftime('%a, %d %b %Y %H:%M:%S')
        else:
            self._cookie[name]['expires'] = ''

        # to protect against cookie-stealing JS, make our cookie
        # available only to the browser, and not to any scripts
        try:
            # This will not work for Python 2.5 and older
            self._cookie[name]['httponly'] = True
        except CookieError:
            pass

        # if the sessions dir doesn't exist, create it
        if not exists(dir):
            mkdir(dir)
        # persist the session data
        self._shelf_file = path_join(dir, self.sid)
        # -1 signifies the highest available protocol version
        self._shelf = shelve_open(self._shelf_file, protocol=-1, writeback=True)
Пример #6
0
def __setup_db():

    homedir = os_path.expanduser('~')
    PYDATASET_HOME = path_join(homedir, '.pydataset/')

    if not os_path.exists(PYDATASET_HOME):
        # create $HOME/.pydataset/
        os_mkdir(PYDATASET_HOME)
        print('initiated datasets repo at: {}'.format(PYDATASET_HOME))

        # copy the resources.tar.gz from the module files.

        # # There should be a better way ? read from a URL ?
        import pydataset
        filename = path_join(pydataset.__path__[0], 'resources.tar.gz')
        tar = tarfile.open(filename, mode='r|gz')

        # # reading 'resources.tar.gz' from a URL
        # try:
        #     from urllib.request import urlopen # py3
        # except ImportError:
        #     from urllib import urlopen # py2
        # import tarfile
        #
        # targz_url = 'https://example.com/resources.tar.gz'
        # httpstrem = urlopen(targz_url)
        # tar = tarfile.open(fileobj=httpstrem, mode="r|gz")

        # extract 'resources.tar.gz' into PYDATASET_HOME
        # print('extracting resources.tar.gz ... from {}'.format(targz_url))
        tar.extractall(path=PYDATASET_HOME)
        # print('done.')
        tar.close()
Пример #7
0
    def uniqueFilename(self, name,
    count=None, count_format="%d", save=True):
        # Test with no count suffix
        name = basename(name)
        if not name:
            raise ValueError("Empty filename")
        if count is None and not self._exists(name):
            if save:
                self.files.add(name)
            return path_join(self.directory, name)

        # Create filename pattern: "archive.tar.gz" => "archive-%04u.tar.gz"
        name_pattern = name.split(".", 1)
        if count is None:
            count = 2
        count_format = "-" + count_format
        if 1 < len(name_pattern):
            name_pattern = name_pattern[0] + count_format + '.' + name_pattern[1]
        else:
            name_pattern = name_pattern[0] + count_format

        # Try names and increment count at each step
        while True:
            name = name_pattern % count
            if not self._exists(name):
                if save:
                    self.files.add(name)
                return path_join(self.directory, name)
            count += 1
Пример #8
0
def main(args):
    if not args:
        return

    key = "" #enter API key
    auth = b64encode(bytes("api:" + key, "ascii")).decode("ascii")
    headers = {"Authorization" : " ".join(("Basic", auth))}
    connection = https("api.tinypng.com")
    for picture_file in gen_file_list(args):
        print(" ".join((">>>Shrink pic:", picture_file)))

        result_dir = path_join(dirname(picture_file), "tiny_png_optimized")
        if not isdir(result_dir):
            mkdir(result_dir)
        output = path_join(result_dir, basename(picture_file))

        connection.request("POST", "https://api.tinypng.com/shrink", open(picture_file, "rb").read(), headers)
        response = connection.getresponse()
        if response.status == 201:
            # Compression was successful, retrieve output from Location header.
            response.read()
            connection.request("GET", response.getheader("Location"))
            result = connection.getresponse()
            open(output, "wb").write(result.read())
            print(" ".join(("Succesfuly shrinked. Result pic:", output)))
        else:
            # Something went wrong! You can parse the JSON body for details.
            print(" ".join(("Failed to compress:", picture_file, "Status:", str(response.status))))
            print(" ".join(("Reason:", response.reason)))
            response.read()
Пример #9
0
 def run(self):
     with open(path_join(dirname(__file__), 'faulthandler.pth'), 'w') as fh:
         with open(path_join(dirname(__file__), 'faulthandler.embed')) as sh:
             fh.write(
                 'import os, sys;'
                 'exec(%r)' % sh.read().replace('    ', ' ')
             )
Пример #10
0
def install(install_asset_info, install_path):
    old_install_files = listdir(install_path)
    mapping = {}

    for asset_info in install_asset_info:
        if not asset_info.install:
            continue
        try:
            file_hash = get_file_hash(asset_info.build_path)
            logical_path = asset_info.logical_path
            physical_path = '%s_%s.%s' % (splitext(basename(logical_path))[0],
                                          file_hash,
                                          asset_info.build_path.split('.', 1)[1])

            copy_file(asset_info.build_path, path_join(install_path, physical_path))
            mapping[logical_path] = physical_path

            try:
                old_install_files.remove(physical_path)
            except ValueError:
                pass

        except (IOError, TypeError):
            error('could not install %s' % asset_info.path)

    for path in old_install_files:
        asset_install_path = path_join(install_path, path)
        print 'Removing old install file ' + asset_install_path
        remove_file(asset_install_path)

    return mapping
Пример #11
0
def copy(root, dest_root, path):
	full_dest = path_join(dest_root,path)
	makedirs(dirname(full_dest))
	
	src_code_lines = open(path_join(root,path),'r').readlines()
	prepared_code_lines = prepare(src_code_lines)
	open(full_dest,'w').write(''.join(prepared_code_lines))
Пример #12
0
def override_installer(platforms, installer, spec):
    from glob import glob
    from yaml import load as yaml_load
    from os.path import basename, isfile
    from copy import deepcopy

    default_yaml = path_join(spec, '_default.yaml')
    if isfile(default_yaml):
        yaml = yaml_load(file(default_yaml).read())
        default = yaml.get('default', {})
        map(lambda x: x in yaml and default.update(yaml[x]), platforms)
    else:
        default = {}

    installer['_default'].update(default)

    for filename in glob(path_join(spec, '[a-z0-9A-Z]*.yaml')):
        name = basename(filename)[0:-len('.yaml')]
        yaml = yaml_load(file(filename).read())
        result = deepcopy(installer['_default'])
        result.update(yaml.get('default', {}))
        map(lambda x: x in yaml and result.update(yaml[x]), platforms)

        if name in installer:
            installer[name].update(result)
        else:
            installer[name] = result

    return installer
Пример #13
0
def install(options):
    platforms = get_platform()
    installer = dict(_default={})

    # read data from default installer
    spec = path_join(options['base'], 'installers', 'default')
    override_installer(platforms, installer, spec)

    # update with specified installer
    spec = path_join(options['base'], 'installers', options['installer'])
    override_installer(platforms, installer, spec)

    for name, spec in installer.items():

        if name.startswith('_'):
            # skip internal node
            continue

        try:
            # force install via command line option
            if 'force' in options:
                spec['force'] = 1
            do_install(name, spec)
        except CampError as e:
            error('[%s]: %s' % (name, e))
Пример #14
0
    def get_zope_config():
        '''Try and figure out where the groupserver config is, using Zope

:returns: The location of the config file from Zope.
:rtype: str
:raises gs.config.errors.ConfigFileError: The configration file failed to
    be read.

The location of the configration file should either be the
``$ZOPE_HOME/etc/gsconfig.ini`` or ``$INSTANCE_HOME/etc/gsconfig.ini``, with
the latter preferred for backwards compatibility, but the former being more
typical. This normally equates to ``etc/gsconfig.ini`` within what the
installation documentation refers to as *the GroupServer directory*.
'''
        cfg = getConfiguration()
        # The old location. May go AWOL.
        iConfigPath = path_join(cfg.instancehome, 'etc/gsconfig.ini')
        # The better location.
        zConfigPath = path_join(cfg.zopehome, 'etc/gsconfig.ini')

        if ((not isfile(iConfigPath)) and (not isfile(zConfigPath))):
            m = 'Could not read the configuration, as neither "{0}" nor '\
                '"{1}" exist.'
            msg = m.format(iConfigPath, zConfigPath)
            raise ConfigFileError(msg)

        retval = iConfigPath if isfile(iConfigPath) else zConfigPath
        return retval
Пример #15
0
 def make_counts(self, preprocessor, short_id, column_names, type_n, type_v):
     #count_vector_titles = CountVectorizer(
         #read_column(train_filename, column_name),
         #max_features=200)
     file_id = self._check_type_n(type_n)
     valid_file_id = self._check_type_n(type_v)
     name = "%s_%s_%s_%s"
     for column_name in column_names:
         vocabulary_path = path_join(self.cache_dir, name % (column_name, type_n, short_id, "vocabulary"))
         stop_words_path = path_join(self.cache_dir, name % (column_name, type_n, short_id, "stop_words"))
         valid_path = path_join(self.cache_dir, name % (column_name, type_v, short_id, "matrix"))
         cur_preprocessor = clone(preprocessor)
         print "Working on %s" % column_name
         if isfile(vocabulary_path) and isfile(stop_words_path):
             print "vocabulary exists"
             vocabulary = joblib.load(vocabulary_path)
             stop_words = joblib.load(stop_words_path)
             cur_preprocessor.set_params(vocabulary=vocabulary)
             cur_preprocessor.set_params(stop_words=stop_words)
         else:
             print "Fitting train"
             cur_preprocessor.set_params(input=self.read_column(file_id, column_name))
             titles = cur_preprocessor.fit_transform(self.read_column(file_id, column_name))
             joblib.dump(cur_preprocessor.vocabulary_, vocabulary_path)
             joblib.dump(cur_preprocessor.stop_words_, stop_words_path)
             print joblib.dump(titles, path_join(self.cache_dir, name % (column_name, type_n, short_id, "matrix")))
         if not isfile(valid_path):
             print "Fitting valid"
             titles_valid = cur_preprocessor.transform(
                 self.read_column(valid_file_id, column_name))
             print joblib.dump(titles_valid, valid_path)
Пример #16
0
 def copy_assets(self):
   source = pkg_resources.resource_filename(__name__, path_join("data", "static"))
   try:
     shutil.copytree(source, path_join(self.path, "static"))
     print "Assets copied"
   except OSError as e:
     print 'Static folder was not copied because of an error:', str(e)
def loadModel(user_id, op_iter):
    user_dir = path_join(TEMP_PATH, str(user_id))
    curr_fn = "%s_%04d" % (DATASET_NAME, op_iter)
    next_fn = "%s_%04d" % (DATASET_NAME, op_iter + 1)
    config_path = path_join(user_dir, curr_fn + ".conf")
    model_out_path = path_join(user_dir, next_fn + ".model")
    dump_path = path_join(user_dir, next_fn + ".dump")

    sys.stderr.write("\n".join([config_path, model_out_path, dump_path]) + "\n")

    # evaluation
    proc = subprocess.Popen(
        [XGBOOST_PATH, config_path, "task=eval", "model_in=" + model_out_path],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )

    evals = parseEval(proc.stderr.read())
    sys.stderr.write("eval log: " + str(evals) + "\n")
    test_error = evals["test-error"]
    train_error = evals["train-error"]

    features, feature_map = feature_utils.loadFeatureTable(FEATTABLE_PATH, FEATMAP_PATH)
    json_obj = dump2json(dump_path, feature_map)
    json_obj["test_error"] = test_error
    json_obj["train_error"] = train_error
    if op_iter == 0:
        json_obj["features"] = features

    return json_obj
Пример #18
0
def _get_current_format(datadir):
    if not exists(path_join(datadir, '.version')):
        # Format v1 didn't have a .version file.
        return 1

    with open(path_join(datadir, '.version')) as version_file:
        return int(version_file.read())
Пример #19
0
 def _pkg_prepare_install(self):
     build_lib = self.get_finalized_command('build_binary').build_lib
     # two-staged copy in case of built_files
     for filedef in self.built_files:
         src = src_orig = self.pkg_params[filedef['src']]
         src = path_basename(src_orig)
         assert src == src_orig, "built_files contains dirs"
         src = path_join(build_lib, src)
         dst = self.pkg_params[filedef['dst']]
         dst = path_join(build_lib, path_basename(dst))
         if src != dst:
             self._pkg_prepare_file(src, dst)
         self.pkg_params[filedef['src']] = dst
     icmd = self.distribution.get_command_obj('install', create=False)
     for filedef in (self.data_files + self.built_files):
         src = self.pkg_params[filedef['src']]
         dst = self.pkg_params[filedef['dst']]
         no_glob = all(c not in path_basename(src) for c in '?*')
         if dst.startswith(sys_prefix):
             dst = path_join(icmd.install_base, dst[len(sys_prefix)+1:])
         self.distribution.data_files.append((
             path_dirname(dst), [
                 path_join(
                     path_dirname(src),
                     path_basename(dst)
                 ),
             ]
         ) if no_glob else (
             dst,
             glob(src)
         ))
         if DEBUG:
             print(DBGPFX + "\tinstall data_files: %s"
                   % self.distribution.data_files)
Пример #20
0
	def __str__(self):
		filename = self._p_conn.filename
		base = path_join(abspath(dirname(filename)),
			'%s.anonymous' %filename)

		uuid = self.uuid
		return path_join(base, uuid[:2], uuid[2:4], uuid[4:])
Пример #21
0
    def post(self, request, pk):
        repo = get_object_or_404(models.Repository, pk=pk)

        choices = [(d[0][2:], d[0]) \
            for d in repo.vcs_repo.head.base_tree.walk()]

        form = self.form_class(request.POST, choices=choices)
        if form.is_valid():
            for tree in form.cleaned_data['trees']:
                for app in repo.vcs_repo.head.get_file(tree).trees:
                    abs_path = path_join(tree, app.name)
                    makefile = path_join(abs_path, 'Makefile')
                    try:
                        app_name, blacklist, whitelist = models.Application.get_name_and_lists_from_makefile(repo, makefile)
                    except models.Application.DoesNotExist:
                        continue
                    except AssertionError:
                        continue
                    appobj = models.Application(name=app_name, path=abs_path)
                    appobj.save()
                    app_tree, created = models.ApplicationTree.objects.get_or_create(
                        tree_name=tree, repo=repo, application=appobj)
                    for board in models.Board.objects.all():
                        if board.riot_name in blacklist and board not in appobj.blacklisted_boards.all():
                            appobj.blacklisted_boards.add(board)
                        if board.riot_name in whitelist and board not in appobj.blacklisted_boards.all():
                            appobj.whitelisted_boards.add(board)
            return HttpResponseRedirect(reverse_lazy('repository-list'))
        return render(request, self.template_name, form)
Пример #22
0
def check_py_tool(env_name, tool_name, env, options, default_arg=None, required=False):
    if required:
        _warning = warning
        _error = error
    else:
        _warning = info
        _error = info

    info("Searching for tool: %s" % tool_name)

    tools = [
        tool_name,
        path_join(env['PYTOOLS_ROOT'], tool_name),
        path_join(env['PYTOOLS_ROOT'], tool_name + '.py'),
        path_join('tools', tool_name),
        path_join('tools', tool_name + '.py')
    ]

    for tool in tools:
        info("Calling tool: %s" % tool)

        args = [tool]
        if default_arg:
            args.append(default_arg)
        try:
            result = exec_command(args)
        except CalledProcessError:
            _warning("Failed to run tool as: %s" % args)
        else:
            break
    else:
        _error("Failed to find tool: %s" % tool_name)
        return None

    return tool
Пример #23
0
    def run(self):
        """Run extension builder."""
        if "%x" % sys.maxsize != '7fffffffffffffff':
            raise DistutilsPlatformError("%s require 64-bit operating system" %
                                         SETUP_METADATA["packages"])

        if "z" not in self.libraries:
            zcmd = ['bash', '-c', 'cd ' + ZLIBDIR + ' && ( test Makefile -nt'
                    ' configure || bash ./configure --static ) && make -f '
                    'Makefile.pic PIC']
            spawn(cmd=zcmd, dry_run=self.dry_run)
            self.extensions[0].extra_objects.extend(
                path_join("third-party", "zlib", bn + ".lo") for bn in [
                    "adler32", "compress", "crc32", "deflate", "gzclose",
                    "gzlib", "gzread", "gzwrite", "infback", "inffast",
                    "inflate", "inftrees", "trees", "uncompr", "zutil"])
        if "bz2" not in self.libraries:
            bz2cmd = ['bash', '-c', 'cd ' + BZIP2DIR + ' && make -f '
                      'Makefile-libbz2_so all']
            spawn(cmd=bz2cmd, dry_run=self.dry_run)
            self.extensions[0].extra_objects.extend(
                path_join("third-party", "bzip2", bn + ".o") for bn in [
                    "blocksort", "huffman", "crctable", "randtable",
                    "compress", "decompress", "bzlib"])
        _build_ext.run(self)
Пример #24
0
def generate_image_path(content, extname):
    hexdigest = hashlib.md5(content).hexdigest()
    parent_path = path_join("user",
                            "images",
                            hexdigest[:2],
                            hexdigest[2:4])
    return "%s.%s" % (path_join(parent_path, hexdigest), extname)
Пример #25
0
def assemble_directory(input_dir, output_dir, scripts_dir, config):
    paired_reads = []
    for i in open(input_dir):
        paired_reads.append(i.split())

    num_pairs = len(paired_reads)
    num_digits = int(ceil(log10(num_pairs)))  # Used to zero-pad small numbers

    for index, files in enumerate(paired_reads):
        prefix = str(index).zfill(num_digits)
        identifier = '%s_%s' % (prefix, basename(files[0]).split('_R1')[0])
        script = path_join(scripts_dir, str(index)+'.sh')
        sub_output_dir = path_join(output_dir, identifier)
        build_assembly_commands(config, script, files, sub_output_dir)

    launch_script = path_join(scripts_dir, 'launch.sh')
    if 'SLOT_LIMIT' in config:
        slot_limit = int(config['SLOT_LIMIT'])
    else:
        slot_limit = None

    generate_launch_script(launch_script, len(paired_reads), scripts_dir,
                           int(config['NUM_THREADS']),
                           int(config['MEMORY_GB']),
                           output_dir,
                           slot_limit)
Пример #26
0
    def gen_training_data(self,
                          pdbbind_dir,
                          pdbbind_versions=(2016,),
                          home_dir=None,
                          use_proteins=True):
        if home_dir is None:
            home_dir = path_join(dirname(__file__), 'PLECscore')
        filename = path_join(home_dir, 'plecscore_descs_p%i_l%i.csv.gz' %
                             (self.depth_protein, self.depth_ligand))

        # The CSV will contain unfolded FP
        self.descriptor_generator.func.keywords['size'] = MAX_HASH_VALUE
        self.descriptor_generator.shape = MAX_HASH_VALUE

        super(PLECscore, self)._gen_pdbbind_desc(
            pdbbind_dir=pdbbind_dir,
            pdbbind_versions=pdbbind_versions,
            desc_path=filename,
            include_general_set=True,
            use_proteins=use_proteins,
        )

        # reset to the original size
        self.descriptor_generator.func.keywords['size'] = self.size
        self.descriptor_generator.shape = self.size
Пример #27
0
    def gen_json(self, home_dir=None, pdbbind_version=2016):
        if not home_dir:
            home_dir = path_join(dirname(__file__), 'PLECscore')

        if isinstance(self.model, SGDRegressor):
            attributes = ['coef_', 'intercept_', 't_']
        elif isinstance(self.model, MLPRegressor):
            attributes = ['loss_', 'coefs_', 'intercepts_', 'n_iter_',
                          'n_layers_', 'n_outputs_', 'out_activation_']

        out = {}
        for attr_name in attributes:
            attr = getattr(self.model, attr_name)
            # convert numpy arrays to list for json
            if isinstance(attr, np.ndarray):
                attr = attr.tolist()
            elif (isinstance(attr, (list, tuple)) and
                  isinstance(attr[0], np.ndarray)):
                attr = [x.tolist() for x in attr]
            out[attr_name] = attr

        json_path = path_join(home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' %
                              (self.version, self.depth_protein,
                               self.depth_ligand, self.size, pdbbind_version))

        with open(json_path, 'w') as json_f:
            json.dump(out, json_f, indent=2)
        return json_path
Пример #28
0
def locateProgram(program, use_none=False, raise_error=False):
    if isabs(program):
        # Absolute path: nothing to do
        return program
    if dirname(program):
        # ./test => $PWD/./test
        # ../python => $PWD/../python
        program = path_join(getcwd(), program)
        program = normpath(program)
        return program
    if use_none:
        default = None
    else:
        default = program
    paths = getenv('PATH')
    if not paths:
        if raise_error:
            raise ValueError("Unable to get PATH environment variable")
        return default
    for path in paths.split(pathsep):
        filename = path_join(path, program)
        if access(filename, X_OK):
            return filename
    if raise_error:
        raise ValueError("Unable to locate program %r in PATH" % program)
    return default
Пример #29
0
def loadConfigurableDb():
    '''
    Equivalent to GaudiKernel.ConfigurableDb.loadConfigurableDb(), but does a
    deep search and executes the '*.confdb' files instead of importing them.
    '''
    log = GaudiKernel.ConfigurableDb.log
    from os.path import join as path_join
    # look for the confdb files in all the reasonable places
    #  - CMake builds
    confDbFiles = []
    for path in sys.path:
        confDbFiles += [f for f in glob(path_join(path, '*', '*.confdb'))
                        if os.path.isfile(f)]
    #  - used projects and local merged file
    pathlist = os.getenv("LD_LIBRARY_PATH", "").split(os.pathsep)
    for path in filter(os.path.isdir, pathlist):
        confDbFiles += [f for f in [path_join(path, f) for f in os.listdir(path)
                                    if f.endswith('.confdb')]]
    #  - load the confdb files
    for confDb in confDbFiles:
        log.debug( "\t-loading [%s]..." % confDb )
        try:
            cfgDb._loadModule( confDb )
        except Exception, err:
            # It may happen that the file is found but not completely
            # written, usually during parallel builds, but we do not care.
            log.warning( "Could not load file [%s] !", confDb )
            log.warning( "Reason: %s", err )
Пример #30
0
def find_non_ascii(path, env):
    non_ascii_count = 0
    for root, dirs, files in os.walk(path):
        for dir in dirs:
            non_ascii_count += find_non_ascii(path_join(root, dir), env)

        for file in [f for f in files if f.endswith('.js')]:
            filepath = path_join(root, file)
            info('Checking: %s' % filepath)

            data = open(filepath)
            line = 0
            for l in data:
                line += 1
                char = 0
                try:
                    for s in list(unicode(l,'utf-8')):
                        char += 1
                        try:
                            s.encode('ascii')
                        except:
                            warning('%s: Non ASCII character at line:%s char:%s' % (filepath, line, char))
                            non_ascii_count += 1
                except UnicodeDecodeError as e:
                    warning('%s: Non ASCII character at line:%s char:%s' % (filepath, line, char))
                    non_ascii_count += 1
            data.close()

    return non_ascii_count
Пример #31
0
def check_payload(data_dir, payloads_file, external_endpoint, proto_endpoint,
                  response, flags, taint):
    config_reader = ConfigParser(interpolation=None)
    with open(path_join(data_dir, payloads_file),
              encoding='utf-8') as payload_file:
        config_reader.read_file(payload_file)

    for section in config_reader.sections():
        if section == flags.section:
            expected_value = config_reader[section]["value"].replace(
                '[EXTERNAL_ENDPOINT]', external_endpoint)
            expected_value = expected_value.replace("[PROTO_ENDPOINT]",
                                                    proto_endpoint)
            expected_value = expected_value.replace("__XSS__", taint)
            tag_names = config_reader[section]["tag"].split(",")
            attribute = config_reader[section]["attribute"]
            case_sensitive = config_reader[section].getboolean(
                "case_sensitive")
            match_type = config_reader[section].get("match_type", "exact")

            attribute_constraint = {
                attribute: True
            } if attribute not in ["full_string", "string"] else {}

            for tag in response.soup.find_all(tag_names,
                                              attrs=attribute_constraint):
                non_exec_parent = find_non_exec_parent(tag)

                if non_exec_parent and not (tag.name == "frame"
                                            and non_exec_parent == "frameset"):
                    continue

                if attribute == "string" and tag.string:
                    if case_sensitive:
                        if expected_value in tag.string:
                            return True
                    else:
                        if expected_value.lower() in tag.string.lower():
                            return True
                elif attribute == "full_string" and tag.string:
                    if case_sensitive:
                        if match_type == "exact" and expected_value == tag.string.strip(
                        ):
                            return True
                        if match_type == "starts_with" and tag.string.strip(
                        ).startswith(expected_value):
                            return True
                    else:
                        if match_type == "exact" and expected_value.lower(
                        ) == tag.string.strip().lower():
                            return True
                        if match_type == "starts_with" and \
                                tag.string.strip().lower().startswith(expected_value.lower()):
                            return True
                else:
                    # Found attribute specified in .ini file in attributes of the HTML tag
                    if attribute in tag.attrs:
                        if case_sensitive:
                            if match_type == "exact" and tag[
                                    attribute] == expected_value:
                                return True
                            if match_type == "starts_with" and tag[
                                    attribute].startswith(expected_value):
                                return True
                        else:
                            if match_type == "exact" and tag[attribute].lower(
                            ) == expected_value.lower():
                                return True
                            if match_type == "starts_with" and \
                                    expected_value.lower().startswith(tag[attribute].lower()):
                                return True
            break

    return False
Пример #32
0
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

from argparse import ArgumentParser, FileType
from os.path import abspath, dirname, isfile, join as path_join
from shutil import rmtree
from struct import calcsize, pack, unpack
from subprocess import Popen
from sys import stderr, stdin, stdout
from tempfile import mkdtemp
from platform import system
from os import devnull
import numpy as np

### Constants
IS_WINDOWS = True if system() == 'Windows' else False
BH_TSNE_BIN_PATH = path_join(dirname(__file__), 'windows', 'bh_tsne.exe') if IS_WINDOWS else path_join(dirname(__file__), 'bh_tsne')
assert isfile(BH_TSNE_BIN_PATH), ('Unable to find the bh_tsne binary in the '
    'same directory as this script, have you forgotten to compile it?: {}'
    ).format(BH_TSNE_BIN_PATH)
# Default hyper-parameter values from van der Maaten (2014)
# https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf (Experimental Setup, page 13)
DEFAULT_NO_DIMS = 2
INITIAL_DIMENSIONS = 50
DEFAULT_PERPLEXITY = 50
DEFAULT_THETA = 0.5
EMPTY_SEED = -1

###

def _argparse():
    argparse = ArgumentParser('bh_tsne Python wrapper')
Пример #33
0
def bh_tsne(samples, no_dims=DEFAULT_NO_DIMS, initial_dims=INITIAL_DIMENSIONS, perplexity=DEFAULT_PERPLEXITY,
            theta=DEFAULT_THETA, randseed=EMPTY_SEED, verbose=False):

    samples -= np.mean(samples, axis=0)
    cov_x = np.dot(np.transpose(samples), samples)
    [eig_val, eig_vec] = np.linalg.eig(cov_x)

    # sorting the eigen-values in the descending order
    eig_vec = eig_vec[:, eig_val.argsort()[::-1]]

    if initial_dims > len(eig_vec):
        initial_dims = len(eig_vec)

    # truncating the eigen-vectors matrix to keep the most important vectors
    eig_vec = eig_vec[:, :initial_dims]
    samples = np.dot(samples, eig_vec)

    # Assume that the dimensionality of the first sample is representative for
    #   the whole batch
    sample_dim = len(samples[0])
    sample_count = len(samples)

    # bh_tsne works with fixed input and output paths, give it a temporary
    #   directory to work in so we don't clutter the filesystem
    with TmpDir() as tmp_dir_path:
        # Note: The binary format used by bh_tsne is roughly the same as for
        #   vanilla tsne
        with open(path_join(tmp_dir_path, 'data.dat'), 'wb') as data_file:
            # Write the bh_tsne header
            data_file.write(pack('iiddi', sample_count, sample_dim, theta, perplexity, no_dims))
            # Then write the data
            for sample in samples:
                data_file.write(pack('{}d'.format(len(sample)), *sample))
            # Write random seed if specified
            if randseed != EMPTY_SEED:
                data_file.write(pack('i', randseed))

        # Call bh_tsne and let it do its thing
        with open(devnull, 'w') as dev_null:
            bh_tsne_p = Popen((abspath(BH_TSNE_BIN_PATH), ), cwd=tmp_dir_path,
                    # bh_tsne is very noisy on stdout, tell it to use stderr
                    #   if it is to print any output
                    stdout=stderr if verbose else dev_null)
            bh_tsne_p.wait()
            assert not bh_tsne_p.returncode, ('ERROR: Call to bh_tsne exited '
                    'with a non-zero return code exit status, please ' +
                    ('enable verbose mode and ' if not verbose else '') +
                    'refer to the bh_tsne output for further details')

        # Read and pass on the results
        with open(path_join(tmp_dir_path, 'result.dat'), 'rb') as output_file:
            # The first two integers are just the number of samples and the
            #   dimensionality
            result_samples, result_dims = _read_unpack('ii', output_file)
            # Collect the results, but they may be out of order
            results = [_read_unpack('{}d'.format(result_dims), output_file)
                for _ in xrange(result_samples)]
            # Now collect the landmark data so that we can return the data in
            #   the order it arrived
            results = [(_read_unpack('i', output_file), e) for e in results]
            # Put the results in order and yield it
            results.sort()
            for _, result in results:
                yield result
Пример #34
0
def globs(pathnames, dirpath='.'):
    import glob
    files = []
    for pathname in pathnames:
        files.extend(glob.glob(path_join(dirpath, pathname)))
    return files
Пример #35
0
def main(raw_args):
    import argparse

    from collections import OrderedDict
    from textwrap import dedent

    target_indiv_values = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
    target_values = target_indiv_values + ['all']

    actions = OrderedDict()
    actions['configure'] = configure
    actions['make'] = make
    actions['clean'] = clean

    parser = argparse.ArgumentParser(
        description='Builds the Mono runtime for Android',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=dedent('''\
            environment variables:
                ANDROID_SDK_ROOT: Overrides default value for --android-sdk
                ANDROID_NDK_ROOT: Overrides default value for --android-ndk
                MONO_SOURCE_ROOT: Overrides default value for --mono-sources
                ANDROID_HOME: Same as ANDROID_SDK_ROOT
            ''')
        )

    def custom_bool(val):
        if isinstance(val, bool):
            return val
        if val.lower() in ('yes', 'true', 't', 'y', '1'):
            return True
        elif val.lower() in ('no', 'false', 'f', 'n', '0'):
            return False
        else:
            raise argparse.ArgumentTypeError('Boolean value expected.')

    home = os.getenv('HOME', '')
    android_sdk_default = os.getenv('ANDROID_HOME', os.getenv('ANDROID_SDK_ROOT', path_join(home, 'Android/Sdk')))
    android_ndk_default = os.getenv('ANDROID_NDK_ROOT', path_join(android_sdk_default, 'ndk-bundle'))
    mono_sources_default = os.getenv('MONO_SOURCE_ROOT', '')

    default_help = dedent('default: %(default)s')

    parser.add_argument('action', choices=['configure', 'make', 'clean'])
    parser.add_argument('--target', choices=target_values, action='append', required=True)
    parser.add_argument('--configuration', choices=['release', 'debug'], default='release', help=default_help)
    parser.add_argument('--toolchains-prefix', default=path_join(home, 'android-toolchains'), help=default_help)
    parser.add_argument('--android-sdk', default=android_sdk_default, help=default_help)
    parser.add_argument('--android-ndk', default=android_ndk_default, help=default_help)
    parser.add_argument('--android-api-version', default='18', help=default_help)
    parser.add_argument('--android-cmake-version', default='autodetect', help=default_help)
    parser.add_argument('--enable-cxx', action='store_true', default=False, help=default_help)
    parser.add_argument('--verbose-make', action='store_true', default=False, help=default_help)
    parser.add_argument('--strip-libs', type=custom_bool, default=True, help=default_help)
    parser.add_argument('--with-monodroid', type=custom_bool, default=True, help=default_help)
    parser.add_argument('--configure-dir', default=path_join(home, 'mono-configs'), help=default_help)
    parser.add_argument('--install-dir', default=path_join(home, 'mono-installs'), help=default_help)

    if mono_sources_default:
        parser.add_argument('--mono-sources', default=mono_sources_default, help=default_help)
    else:
        parser.add_argument('--mono-sources', required=True)

    args = parser.parse_args(raw_args)

    action = args.action
    targets = args.target

    set_arguments(args)

    if not isdir(MONO_SOURCE_ROOT):
        print('Mono sources directory not found: ' + MONO_SOURCE_ROOT)
        exit(1)

    android_targets = []

    if 'all' in targets:
        android_targets = target_indiv_values[:]
    else:
        for target in targets:
            if not target in android_targets:
                android_targets += [target]

    action_fn = actions[action]

    try:
        for target in android_targets:
            action_fn('android', target)
    except MonoBuildError as e:
        exit(e.message)
Пример #36
0
def setup_android_target_template(env, target):
    extra_target_envs = {
        'armeabi-v7a': {
            'android-armeabi-v7a_CFLAGS': ['-D__POSIX_VISIBLE=201002', '-DSK_RELEASE', '-DNDEBUG', '-UDEBUG', '-fpic', '-march=armv7-a', '-mtune=cortex-a8', '-mfpu=vfp', '-mfloat-abi=softfp'],
            'android-armeabi-v7a_CXXFLAGS': ['-D__POSIX_VISIBLE=201002', '-DSK_RELEASE', '-DNDEBUG', '-UDEBUG', '-fpic', '-march=armv7-a', '-mtune=cortex-a8', '-mfpu=vfp', '-mfloat-abi=softfp'],
            'android-armeabi-v7a_LDFLAGS': ['-Wl,--fix-cortex-a8']
        },
        'arm64-v8a': {
            'android-arm64-v8a_CFLAGS': ['-D__POSIX_VISIBLE=201002', '-DSK_RELEASE', '-DNDEBUG', '-UDEBUG', '-fpic', '-DL_cuserid=9', '-DANDROID64'],
            'android-arm64-v8a_CXXFLAGS': ['-D__POSIX_VISIBLE=201002', '-DSK_RELEASE', '-DNDEBUG', '-UDEBUG', '-fpic', '-DL_cuserid=9', '-DANDROID64']
        },
        'x86': {},
        'x86_64': {
            'android-x86_64_CFLAGS': ['-DL_cuserid=9'],
            'android-x86_64_CXXFLAGS': ['-DL_cuserid=9']
        }
    }

    env.update(extra_target_envs[target])

    android_new_ndk = True

    with open(path_join(ANDROID_NDK_ROOT, 'source.properties')) as file:
        for line in file:
            line = line.strip()
            if line.startswith('Pkg.Revision ') or line.startswith('Pkg.Revision='):
                pkg_revision = line.split('=')[1].strip()
                mayor = int(pkg_revision.split('.')[0])
                android_new_ndk = mayor >= 18
                break

    arch = AndroidTargetInfo.archs[target]
    abi_name = AndroidTargetInfo.abi_names[target]
    host_triple = AndroidTargetInfo.host_triples[target]
    api = env['ANDROID_API_VERSION']

    toolchain_path = path_join(ANDROID_TOOLCHAINS_PREFIX, TOOLCHAIN_NAME_FMT % (target, api))

    tools_path = path_join(toolchain_path, 'bin')
    name_fmt = abi_name + '-%s'

    sdk_cmake_dir = path_join(ANDROID_SDK_ROOT, 'cmake', get_android_cmake_version())
    if not isdir(sdk_cmake_dir):
        print('Android CMake directory \'%s\' not found' % sdk_cmake_dir)

    AR = path_join(tools_path, name_fmt % 'ar')
    AS = path_join(tools_path, name_fmt % 'as')
    CC = path_join(tools_path, name_fmt % 'clang')
    CXX = path_join(tools_path, name_fmt % 'clang++')
    DLLTOOL = ''
    LD = path_join(tools_path, name_fmt % 'ld')
    OBJDUMP = path_join(tools_path, name_fmt % 'objdump')
    RANLIB = path_join(tools_path, name_fmt % 'ranlib')
    CMAKE = path_join(sdk_cmake_dir, 'bin', 'cmake')
    STRIP = path_join(tools_path, name_fmt % 'strip')

    CPP = path_join(tools_path, name_fmt % 'cpp')
    if not isfile(CPP):
        CPP = path_join(tools_path, (name_fmt % 'clang'))
        CPP += ' -E'

    CXXCPP = path_join(tools_path, name_fmt % 'cpp')
    if not isfile(CXXCPP):
        CXXCPP = path_join(tools_path, (name_fmt % 'clang++'))
        CXXCPP += ' -E'

    ccache_path = os.getenv('CCACHE', '')
    if ccache_path:
        CC = '%s %s' % (ccache_path, CC)
        CXX = '%s %s' % (ccache_path, CXX)
        CPP = '%s %s' % (ccache_path, CPP)
        CXXCPP = '%s %s' % (ccache_path, CXXCPP)

    AC_VARS = [
        'mono_cv_uscore=yes',
        'ac_cv_func_sched_getaffinity=no',
        'ac_cv_func_sched_setaffinity=no',
        'ac_cv_func_shm_open_working_with_mmap=no'
    ]

    CFLAGS, CXXFLAGS, CPPFLAGS, CXXCPPFLAGS, LDFLAGS = [], [], [], [], []

    # On Android we use 'getApplicationInfo().nativeLibraryDir' as the libdir where Mono will look for shared objects.
    # This path looks something like this: '/data/app-lib/{package_name-{num}}'. However, Mono does some relocation
    # and the actual path it will look at will be '/data/app-lib/{package_name}-{num}/../lib', which doesn't exist.
    # Cannot use '/data/data/{package_name}/lib' either, as '/data/data/{package_name}/lib/../lib' may result in
    # permission denied. Therefore we just override 'MONO_RELOC_LIBDIR' here to avoid the relocation.
    CPPFLAGS += ['-DMONO_RELOC_LIBDIR=\\\".\\\"']

    CFLAGS += ['-fstack-protector']
    CFLAGS += ['-DMONODROID=1'] if WITH_MONODROID else []
    CFLAGS += ['-D__ANDROID_API__=' + api] if android_new_ndk else []
    CXXFLAGS += ['-fstack-protector']
    CXXFLAGS += ['-DMONODROID=1'] if WITH_MONODROID else []
    CXXFLAGS += ['-D__ANDROID_API__=' + api] if android_new_ndk else []

    CPPFLAGS += ['-I%s/sysroot/usr/include' % toolchain_path]
    CXXCPPFLAGS += ['-I%s/sysroot/usr/include' % toolchain_path]

    path_link = '%s/platforms/android-%s/arch-%s/usr/lib' % (ANDROID_NDK_ROOT, api, arch)

    LDFLAGS += [
        '-z', 'now', '-z', 'relro', '-z', 'noexecstack',
        '-ldl', '-lm', '-llog', '-lc', '-lgcc',
        '-Wl,-rpath-link=%s,-dynamic-linker=/system/bin/linker' % path_link,
        '-L' + path_link
    ]

    # Fixes this error: DllImport unable to load library 'dlopen failed: empty/missing DT_HASH in "libmono-native.so" (built with --hash-style=gnu?)'.
    LDFLAGS += ['-Wl,--hash-style=both']

    CONFIGURE_FLAGS = [
        '--disable-boehm',
        '--disable-executables',
        '--disable-iconv',
        '--disable-mcs-build',
        '--disable-nls',
        '--enable-dynamic-btls',
        '--enable-maintainer-mode',
        '--enable-minimal=ssa,portability,attach,verifier,full_messages,sgen_remset'
                ',sgen_marksweep_par,sgen_marksweep_fixed,sgen_marksweep_fixed_par'
                ',sgen_copying,logging,security,shared_handles,interpreter',
        '--with-btls-android-ndk=%s' % ANDROID_NDK_ROOT,
        '--with-btls-android-api=%s' % api,
    ]

    CONFIGURE_FLAGS += ['--enable-monodroid'] if WITH_MONODROID else []
    CONFIGURE_FLAGS += ['--with-btls-android-ndk-asm-workaround'] if android_new_ndk else []

    CONFIGURE_FLAGS += [
        '--with-btls-android-cmake-toolchain=%s/build/cmake/android.toolchain.cmake' % ANDROID_NDK_ROOT,
        '--with-sigaltstack=yes',
        '--with-tls=pthread',
        '--without-ikvm-native',
        '--disable-cooperative-suspend',
        '--disable-hybrid-suspend',
        '--disable-crash-reporting'
    ]

    env['_android-%s_AR' % target] = AR
    env['_android-%s_AS' % target] = AS
    env['_android-%s_CC' % target] = CC
    env['_android-%s_CXX' % target] = CXX
    env['_android-%s_CPP' % target] = CPP
    env['_android-%s_CXXCPP' % target] = CXXCPP
    env['_android-%s_DLLTOOL' % target] = DLLTOOL
    env['_android-%s_LD' % target] = LD
    env['_android-%s_OBJDUMP' % target] = OBJDUMP
    env['_android-%s_RANLIB' % target] = RANLIB
    env['_android-%s_CMAKE' % target] = CMAKE
    env['_android-%s_STRIP' % target] = STRIP

    env['_android-%s_AC_VARS' % target] = AC_VARS
    env['_android-%s_CFLAGS' % target] = CFLAGS
    env['_android-%s_CXXFLAGS' % target] = CXXFLAGS
    env['_android-%s_CPPFLAGS' % target] = CPPFLAGS
    env['_android-%s_CXXCPPFLAGS' % target] = CXXCPPFLAGS
    env['_android-%s_LDFLAGS' % target] = LDFLAGS
    env['_android-%s_CONFIGURE_FLAGS' % target] = CONFIGURE_FLAGS

    setup_runtime_template(env, 'android', target, host_triple)
Пример #37
0
    parser = argparse.ArgumentParser(
        description="extract tppk files recursively")
    parser.add_argument(
        "-d",
        "--dir_path",
        help="directory path to extract",
        required=True,
    )
    parser.add_argument("-t",
                        "--tppk_tool_path",
                        help="TppkTool path",
                        required=True)

    args = parser.parse_args()

    all_files = glob.glob(path_join(args.dir_path, "**", "*"), recursive=True)
    for file in all_files:
        if isdir(file):
            continue
        with open(file, "rb") as f:
            magic_code = f.read(4)
            if magic_code != b"tppk":
                continue
            print(f"[-] Extract {file}")
            process = Popen(
                f"{args.tppk_tool_path} extract -o {file}_tppk_extracted {file}",
                stdout=PIPE,
                stderr=PIPE,
            )
            print_process(process, CONSOLE_ENCODING)
Пример #38
0
    dirname as path_dirname,
    exists as path_exists,
    join as path_join,
    relpath as path_relpath,
)
from shutil import rmtree as shutil_rmtree
from sys import path as sys_path

from nose.tools import (ok_, raises as nose_raises)

SCRIPT_PATH = path_dirname(
    path_abspath(inspect_getfile(inspect_currentframe())))
PROJECT_ROOT = path_dirname(SCRIPT_PATH)

ROOT_PACKAGE_NAME = 'JqPyCharts'
ROOT_PACKAGE_PATH = path_join(PROJECT_ROOT, ROOT_PACKAGE_NAME)

sys_path.insert(0, PROJECT_ROOT)

from JqPyCharts.main_code import jqpc_simple_pie_chart
from JqPyCharts.utils import Err


def test_jqpc_simple_pie_chart_ok1():
    """ Tests: test_jqpc_simple_pie_chart_ok1
   """
    print('::: TEST: test_jqpc_simple_pie_chart_ok1()')

    scripts_pie_chart_path = path_join(
        SCRIPT_PATH, 'scripts_pie_chart_test_jqpc_simple_pie_chart_ok1')
Пример #39
0
 def delete_known_hosts_file(cls):
     known_hosts = path_join(expanduser("~"), ".neo4j", "known_hosts")
     if isfile(known_hosts):
         remove(known_hosts)
Пример #40
0
class IntegrationTestCase(TestCase):
    """ Base class for test cases that integrate with a server.
    """

    bolt_port = 7687
    bolt_address = ("localhost", bolt_port)

    bolt_uri = "bolt://%s:%d" % bolt_address
    bolt_routing_uri = "bolt+routing://%s:%d" % bolt_address

    user = NEO4J_USER or "neo4j"
    password = NEO4J_PASSWORD or "password"
    auth_token = (user, password)

    controller = None
    dist_path = path_join(dirname(__file__), "dist")
    run_path = path_join(dirname(__file__), "run")

    server_package = NEO4J_SERVER_PACKAGE
    local_server_package = path_join(
        dist_path, basename(server_package)) if server_package else None
    neoctrl_args = NEOCTRL_ARGS

    @classmethod
    def server_version_info(cls):
        with GraphDatabase.driver(cls.bolt_uri, auth=cls.auth_token) as driver:
            with driver.session() as session:
                full_version = session.run("RETURN 1").summary().server.version
                return ServerVersion.from_str(full_version)

    @classmethod
    def at_least_server_version(cls, major, minor):
        return cls.server_version_info().at_least_version(major, minor)

    @classmethod
    def protocol_version(cls):
        with GraphDatabase.driver(cls.bolt_uri, auth=cls.auth_token) as driver:
            with driver.session() as session:
                return session.run("RETURN 1").summary().protocol_version

    @classmethod
    def at_least_protocol_version(cls, version):
        return cls.protocol_version() >= version

    @classmethod
    def assert_supports_spatial_types(cls):
        if not cls.at_least_protocol_version(2):
            raise SkipTest("Spatial types require Bolt protocol v2 or above")

    @classmethod
    def assert_supports_temporal_types(cls):
        if not cls.at_least_protocol_version(2):
            raise SkipTest("Temporal types require Bolt protocol v2 or above")

    @classmethod
    def delete_known_hosts_file(cls):
        known_hosts = path_join(expanduser("~"), ".neo4j", "known_hosts")
        if isfile(known_hosts):
            remove(known_hosts)

    @classmethod
    def _unpack(cls, package):
        try:
            makedirs(cls.run_path)
        except OSError:
            pass
        controller_class = WindowsController if platform.system(
        ) == "Windows" else UnixController
        home = realpath(controller_class.extract(package, cls.run_path))
        return home

    @classmethod
    def _start_server(cls, home):
        controller_class = WindowsController if platform.system(
        ) == "Windows" else UnixController
        cls.controller = controller_class(home, 1)
        if NEO4J_USER is None:
            cls.controller.create_user(cls.user, cls.password)
            cls.controller.set_user_role(cls.user, "admin")
        cls.controller.start()

    @classmethod
    def _stop_server(cls):
        if cls.controller is not None:
            cls.controller.stop()
            if NEO4J_USER is None:
                pass  # TODO: delete user

    @classmethod
    def setUpClass(cls):
        if is_listening(cls.bolt_address):
            print("Using existing server listening on port {}\n".format(
                cls.bolt_port))
            with GraphDatabase.driver(cls.bolt_uri,
                                      auth=cls.auth_token) as driver:
                try:
                    with driver.session():
                        pass
                except AuthError as error:
                    raise RuntimeError("Failed to authenticate (%s)" % error)
        elif cls.server_package is not None:
            print("Using server from package {}\n".format(cls.server_package))
            package = copy_dist(cls.server_package, cls.local_server_package)
            home = cls._unpack(package)
            cls._start_server(home)
        elif cls.neoctrl_args is not None:
            print("Using boltkit to install server 'neoctrl-install {}'\n".
                  format(cls.neoctrl_args))
            edition = "enterprise" if "-e" in cls.neoctrl_args else "community"
            version = cls.neoctrl_args.split()[-1]
            home = _install(edition, version, cls.run_path)
            cls._start_server(home)
        else:
            raise SkipTest("No Neo4j server available for %s" % cls.__name__)

    @classmethod
    def tearDownClass(cls):
        cls._stop_server()
Пример #41
0
def serve_js():
    return send_from_directory(path_join(app.root_path, "static"),
                               "materialize.js")
Пример #42
0
def push_files():
    for local_relative_file, remote_relative_file in FILES_TO_PUSH:
        local_file = path_join(PUSHED_FILES_FOLDER, local_relative_file)
        remote_file = path_join(REMOTE_HOME_DIR, remote_relative_file)
        put(local_file, remote_file)
Пример #43
0
def error_404(e):
    return send_from_directory(path_join(app.root_path, "static"), "404.html")
Пример #44
0
def serve_custom_css():
    return send_from_directory(path_join(app.root_path, "static"),
                               "custom.css")
 def get_metric_output_path(self, random_effect_name):
     """ Get metric output path. """
     output_dir = path_join(self.root_output_dir, random_effect_name)
     metric_output_path = path_join(output_dir, METRIC)
     return metric_output_path
Пример #46
0
import glob
import os
import sys
from os.path import join as path_join
from os.path import sep as path_sep

from common import ProtocolError
from message import Messager

try:
    from config import BASE_DIR, WORK_DIR
except ImportError:
    # for CLI use; assume we're in brat server/src/ and config is in root
    from sys import path as sys_path
    from os.path import dirname
    sys_path.append(path_join(dirname(__file__), '../..'))
    from config import BASE_DIR, WORK_DIR

# Filename extension used for DB file.
SS_DB_FILENAME_EXTENSION = 'ss.db'

# Default similarity measure
DEFAULT_SIMILARITY_MEASURE = 'cosine'

# Default similarity threshold
DEFAULT_THRESHOLD = 0.7

# Length of n-grams in simstring DBs
DEFAULT_NGRAM_LENGTH = 3

# Whether to include marks for begins and ends of strings
Пример #47
0
	def getRcPositions(self):
		return resolveFilename(SCOPE_SKIN, path_join("rc_models", self.getRcFolder(), "rcpositions.xml"))
Пример #48
0
	def getRcLocation(self):
		return resolveFilename(SCOPE_SKIN, path_join("rc_models", self.getRcFolder(), ""))
Пример #49
0
 def __init__(self, read_file=True, theme=None, vol=None, scrobble=None):
     self.config = configparser.ConfigParser(
         comment_prefixes=('#', ';'),
         inline_comment_prefixes=(';', ),
         strict=True)
     self.config.read_dict(
         OrderedDict([
             ('Server',
              OrderedDict([
                  ('host', '127.0.0.1'),
                  ('port', '7887'),
                  ('volume', '11000'),
                  ('scrobble', 'no'),
                  ('notify_logfile', ''),
                  ('update_btt_widget', 'no'),
              ])),
             (
                 'UI',
                 OrderedDict([
                     ('theme', 'auto'),
                     ('light_theme', 'light'),
                     ('dark_theme', 'miami_vice'),
                     ('fallback_theme', 'nocolor'),
                     ('confirm_banner_font', 'no'),
                     ('compact_titles', 'yes'),
                     ('figlet_banners', 'yes'),
                     ('figlet_fonts',
                      dedent("""\
                  3-d, 3x5, 5lineoblique, a_zooloo, acrobatic,
                  alligator, alligator2, alphabet, avatar, banner, banner3-D,
                  banner4, barbwire, basic, bell, big, bigchief, block, britebi,
                  broadway, bubble, bulbhead, calgphy2, caligraphy, catwalk,
                  charact1, charact4, chartri, chunky, clb6x10, coinstak, colossal,
                  computer, contessa, contrast, cosmic, cosmike, courbi, crawford,
                  cricket, cursive, cyberlarge, cybermedium, cybersmall, devilish,
                  diamond, digital, doh, doom, dotmatrix, double, drpepper,
                  dwhistled, eftichess, eftifont, eftipiti, eftirobot, eftitalic,
                  eftiwall, eftiwater, epic, fender, fourtops, fraktur, funky_dr,
                  fuzzy, goofy, gothic, graceful, graffiti, helvbi, hollywood,
                  home_pak, invita, isometric1, isometric2, isometric3, isometric4,
                  italic, ivrit, jazmine, jerusalem, kban, larry3d, lean, letters,
                  linux, lockergnome, madrid, marquee, maxfour, mike, mini, mirror,
                  moscow, mshebrew210, nancyj-fancy, nancyj-underlined, nancyj,
                  new_asci, nipples, ntgreek, nvscript, o8, odel_lak, ogre, os2,
                  pawp, peaks, pebbles, pepper, poison, puffy, rectangles, relief,
                  relief2, rev, roman, rounded, rowancap, rozzo, runic, runyc,
                  sansbi, sblood, sbookbi, script, serifcap, shadow, short, sketch_s,
                  slant, slide, slscript, small, smisome1, smkeyboard, smscript,
                  smshadow, smslant, smtengwar, speed, stacey, stampatello, standard,
                  starwars, stellar, stop, straight, t__of_ap, tanja, tengwar, thick,
                  thin, threepoint, ticks, ticksslant, tinker-toy, tombstone, trek,
                  tsalagi, twin_cob, twopoint, univers, usaflag, utopiabi, weird,
                  whimsy, xbritebi, xcourbi""")
                      ),  # line breaks MUST match __main__.config docstring
                     ('show_stream_ascii_art', 'yes'),
                 ])),
             ('theme_miami_vice',
              OrderedDict([
                  ('ui_banner', 'red'),
                  ('ui_names', 'yellow'),
                  ('ui_desc', 'green'),
                  ('stream_name_banner', 'yellow'),
                  ('stream_name_confirm', 'purple'),
                  ('meta_prefix_str', '>>>'),
                  ('meta_prefix_pad', '1'),
                  ('meta_prefix', 'blue'),
                  ('meta_stream_name', 'blue'),
                  ('meta_song_name', 'blue'),
                  ('stream_exit_confirm', 'purple'),
              ])),
             ('theme_light',
              OrderedDict([
                  ('ui_banner', 'purple'),
                  ('ui_names', 'blue'),
                  ('ui_desc', 'grey'),
                  ('stream_name_banner', 'grey'),
                  ('stream_name_confirm', 'purple'),
                  ('meta_prefix_str', '>>>'),
                  ('meta_prefix_pad', '1'),
                  ('meta_prefix', 'blue'),
                  ('meta_stream_name', 'blue'),
                  ('meta_song_name', 'blue'),
                  ('stream_exit_confirm', 'purple'),
              ])),
             ('theme_nocolor',
              OrderedDict([
                  ('ui_banner', 'endc'),
                  ('ui_names', 'endc'),
                  ('ui_desc', 'endc'),
                  ('stream_name_banner', 'endc'),
                  ('stream_name_confirm', 'endc'),
                  ('meta_prefix_str', '>>>'),
                  ('meta_prefix_pad', '1'),
                  ('meta_prefix', 'endc'),
                  ('meta_stream_name', 'endc'),
                  ('meta_song_name', 'endc'),
                  ('stream_exit_confirm', 'endc'),
              ])),
             ('Lastfm',
              OrderedDict([
                  ('api key', ''),
                  ('shared secret', ''),
                  ('username', ''),
                  ('password hash', ''),
              ])),
             ('BTT',
              OrderedDict([
                  ('widget UUID', ''),
                  ('shared secret', ''),
              ])),
         ]))
     home = expanduser('~')
     self.file = path_join(home, SETTINGS_FILE)
     if read_file:
         self.config.read([self.file])
     if theme is not None:
         self.config['UI']['theme'] = theme
     if vol is not None:
         self.config['UI']['volume'] = str(vol)
     if scrobble is not None:
         if isinstance(scrobble, bool):
             if scrobble:
                 scrobble = 'yes'
             else:
                 scrobble = 'no'
         self.config['Server']['scrobble'] = str(scrobble)
     _check_volume(self.config['Server']['volume'])
     if not os.path.isfile(self.file):
         with open(self.file, 'w') as out_fh:
             self.config.write(out_fh)
Пример #50
0
	def getRcImg(self):
		return resolveFilename(SCOPE_SKIN, path_join("rc_models", self.getRcFolder(), "rc.png"))
Пример #51
0
# numpy has a "nicer" fix:
# https://github.com/numpy/numpy/blob/master/numpy/distutils/ccompiler.py
OPT = get_config_vars('OPT')[0]
os.environ['OPT'] = " ".join(
    flag for flag in OPT.split() if flag != '-Wstrict-prototypes'
)

# We bundle tested versions of zlib & bzip2. To use the system zlib and bzip2
# change setup.cfg or use the `--libraries z,bz2` parameter which will make our
# custom build_ext command strip out the bundled versions.

ZLIBDIR = 'third-party/zlib'
BZIP2DIR = 'third-party/bzip2'

BUILD_DEPENDS = []
BUILD_DEPENDS.extend(path_join("lib", bn + ".hh") for bn in [
    "khmer", "khmer_config", "kmer_hash", "hashtable", "counting",
    "hashbits", "labelhash"])

SOURCES = ["khmer/_khmermodule.cc"]
SOURCES.extend(path_join("lib", bn + ".cc") for bn in [
    "khmer_config", "thread_id_map", "trace_logger", "perf_metrics",
    "read_parsers", "kmer_hash", "hashtable", "hashbits", "labelhash",
    "counting", "subset", "read_aligner"])

EXTRA_COMPILE_ARGS = ['-O3']

if sys.platform == 'darwin':
    EXTRA_COMPILE_ARGS.extend(['-arch', 'x86_64'])  # force 64bit only builds

EXTENSION_MOD_DICT = \
Пример #52
0
logger = logging.getLogger(__name__)


def log(msg, logger=logger):
    logger.info(LOGGER_PREFIX % msg)


TARGET = 'useful'

MODEL_FILE = './yelp-model-rcnn-char-big-{}'.format(TARGET)
LOG_FILE = './log-model-rcnn-char-big-{}'.format(TARGET)

log('Loading training data')

train_reviews = np.load(
    path_join(ROOT_PATH,
              'Yelp_{}_sentences_train_char_3votes_X.npy'.format(TARGET)))
train_labels = np.load(
    path_join(ROOT_PATH,
              'Yelp_{}_sentences_train_char_3votes_y.npy'.format(TARGET)))

log('Shuffling training data')
nb_samples = train_reviews.shape[0]
shuff = range(nb_samples)
np.random.shuffle(shuff)

train_reviews, train_labels = train_reviews[shuff].reshape(
    nb_samples, -1), train_labels[shuff]
del shuff

log('Loading testing data')
Пример #53
0
from os.path import dirname, realpath, join as path_join

PROJECT_ROOT = dirname(dirname(realpath(__file__)))
APP_ROOT = path_join(PROJECT_ROOT, 'covid_app')

DATA_ROOT = path_join(PROJECT_ROOT, 'data')
Пример #54
0
class GraphDatabaseServer(object):

    bolt_port = 7687
    bolt_address = ("localhost", bolt_port)

    bolt_uri = "bolt://%s:%d" % bolt_address
    bolt_routing_uri = "bolt+routing://%s:%d" % bolt_address

    user = NEO4J_USER or "test"
    password = NEO4J_PASSWORD or "test"
    auth_token = (user, password)

    controller = None
    dist_path = path_join(dirname(__file__), "dist")
    run_path = path_join(dirname(__file__), "run")

    server_package = NEO4J_SERVER_PACKAGE
    local_server_package = path_join(
        dist_path, basename(server_package)) if server_package else None

    @classmethod
    def server_version_info(cls):
        with GraphDatabase.driver(cls.bolt_uri, auth=cls.auth_token) as driver:
            with driver.session() as session:
                full_version = session.run("RETURN 1").summary().server.version
                return ServerVersion.from_str(full_version)

    @classmethod
    def at_least_version(cls, major, minor):
        return cls.server_version_info().at_least_version(major, minor)

    @classmethod
    def delete_known_hosts_file(cls):
        known_hosts = path_join(expanduser("~"), ".neo4j", "known_hosts")
        if isfile(known_hosts):
            remove(known_hosts)

    @classmethod
    def _start_server(cls, package):
        try:
            makedirs(cls.run_path)
        except OSError:
            pass
        if platform.system() == "Windows":
            controller_class = WindowsController
        else:
            controller_class = UnixController
        home = realpath(controller_class.extract(package, cls.run_path))
        cls.controller = controller_class(home, 1)
        if NEO4J_USER is None:
            cls.controller.create_user(cls.user, cls.password)
            cls.controller.set_user_role(cls.user, "admin")
        cls.controller.start()

    @classmethod
    def _stop_server(cls):
        if cls.controller is not None:
            cls.controller.stop()
            if NEO4J_USER is None:
                pass  # TODO: delete user

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.stop()

    @classmethod
    def start(cls):
        if is_listening(cls.bolt_address):
            stderr.write("Using existing server listening on port {}\n".format(
                cls.bolt_port))
            with GraphDatabase.driver(cls.bolt_uri,
                                      auth=cls.auth_token) as driver:
                try:
                    with driver.session():
                        pass
                except AuthError as error:
                    stderr.write("{}\n".format(error))
                    exit(1)
            return
        if cls.server_package is None:
            raise RuntimeError("No Neo4j server available for %s" %
                               cls.__name__)
        stderr.write("Using server from package {}\n".format(
            cls.server_package))
        package = copy_dist(cls.server_package, cls.local_server_package)
        cls._start_server(package)

    @classmethod
    def stop(cls):
        cls._stop_server()
Пример #55
0
 def for_2010():
     file_path = path_join(CDC_DATA_DIR, RESULTS_2010_FILE)
     extract = JamesJoyceRelayExtract(file_path)
     return extract
Пример #56
0
def get_data_file_path(obj_dir):
    files = sorted(listdir(obj_dir), reverse=True)
    for filename in files:
        return path_join(obj_dir, filename)
Пример #57
0



import sys
from os.path import join as path_join
from os.path import dirname


from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../server/src'))

from ssplit import regex_sentence_boundary_gen

def _text_by_offsets_gen(text, offsets):
    for start, end in offsets:
        yield text[start:end]

def _normspace(s):
    import re
    return re.sub(r'\s', ' ', s)

def sentencebreaks_to_newlines(text):
    line_offset = 1
    if "\r\n" in text:
        line_offset = 2
    offsets = [o for o in regex_sentence_boundary_gen(text)]

    
    sentences = [s for s in _text_by_offsets_gen(text, offsets)]
Пример #58
0
https://github.com/AllenDowney/ThinkStats2/blob/9ee747/code/Apr25_27thAn_set1.shtml

Used with Think Stats 2e:
http://greenteapress.com/thinkstats2/html/thinkstats2004.html
"""
from os.path import join as path_join
import numpy as np

from cement_app.services.caching_service import cached_property
from cement_app.config.app import DATA_ROOT


#
# Constants
#
CDC_DATA_DIR = path_join(DATA_ROOT, 'races')
RESULTS_2010_FILE = '20100425-joyce-10k-results.shtml'


class JamesJoyceRelayExtract:
    #
    # Static Methods
    #
    def for_2010():
        file_path = path_join(CDC_DATA_DIR, RESULTS_2010_FILE)
        extract = JamesJoyceRelayExtract(file_path)
        return extract

    #
    # Properties
    #
Пример #59
0
def gdmix_sparkjob_op(
        name,
        mainClass,
        arguments,
        secretName,
        mainApplicationFile="local:///opt/spark/jars/gdmix-data-all_2.11.jar",
        image="linkedin/gdmix",
        namespace="default",
        serviceAccount="default",
        driverCores=1,
        driverMemory='2g',
        executorCores=2,
        executorInstances=2,
        executorMemory='1g',
        sparkApplicationTimeoutMinutes=1440,
        deleteAfterDone=False):
    """
    This function prepares params for launch_sparkapplication.py as defined in
    the templates of ../launcher/sparkapplication/sparkapplication_component.yaml,
    which specifies to execute the launch_tfjob.py once the container is ready.
    In the container, the launch_tfjob.py assemble all params to form a deployable
    YAML file to launch the actual spark application.
    """
    componentPath = path_join(resource.__path__[0],
                              "sparkapplication_component.yaml")
    name_placeholder = "SparkApplication-launcher-name"
    spark_application_launcher_op = load_launcher_from_file(
        componentPath, name_placeholder, name)

    # Driver spec for the spark application
    # Note: secret for now is pre-craeted and will be valid for 7 days
    driverSpec = {
        "cores":
        driverCores,
        "memory":
        driverMemory,
        "secrets": [{
            "name": secretName,
            "path": f"/var/tmp/{secretName}",
            "secretType": "HadoopDelegationToken"
        }],
        "serviceAccount":
        serviceAccount
    }

    # Executor spec for the spark application
    executorSpec = {
        "cores": executorCores,
        "instances": executorInstances,
        "memory": executorMemory
    }

    return spark_application_launcher_op(
        name=name,
        namespace=namespace,
        image=image,
        main_class=mainClass,
        arguments=arguments,
        main_application_file=mainApplicationFile,
        driver_spec=driverSpec,
        executor_spec=executorSpec,
        sparkapplication_timeout_minutes=sparkApplicationTimeoutMinutes,
        delete_finished_sparkapplication=deleteAfterDone)
Пример #60
0
def reports(results_input, results_output):
    """Generate reports for EMSE paper."""
    now = pandas.Timestamp(2017, 9, 30, 12)
    df = pandas.read_csv(
        path_join(results_input, "results_with_coverage.csv"),
        parse_dates=[0, 10]
    )
    df_googleplay = pandas.read_csv(
        path_join(results_input, "googleplay.csv"),
        index_col='package'
    )
    df = df.join(df_googleplay, on="app_id")
    df_sonar = pandas.read_csv("results_sonar.csv", index_col='package')
    df_sonar.fillna(0, inplace=True)
    df_sonar = df_sonar.add_prefix('sonar_')
    df = df.join(df_sonar, on="app_id")

    #Feature engineering
    df['tests'] = df[unit_test_frameworks+ui_automation_frameworks+cloud_test_services].any(axis=1)
    df['no_tests'] = ~df['tests']
    df['unit_tests'] = df[unit_test_frameworks].apply(any, axis=1)
    df['ui_tests'] = df[ui_automation_frameworks].apply(any, axis=1)
    df["cloud_tests"] = df[cloud_test_services].apply(any, axis=1)
    df["ci/cd"] = df[ci_services].apply(any, axis=1)
    df['age'] = (now - df['created_at'])
    df['age_numeric'] = (now - df['created_at']).astype('<m8[Y]').astype('int')
    df['time_since_last_update'] = (now - df['last_updated'])
    df['time_since_last_update_numeric'] = df['time_since_last_update'].astype('<m8[Y]').astype('int')
    df_old = df[df['age_numeric']>=2]
    df["downloads"] = df["downloads"].astype("category", categories=downloads_scale, ordered=True)
    df['sonar_issues_ratio'] = df['sonar_issues'].divide(df['sonar_files_processed'])
    df['sonar_blocker_issues_ratio'] = df['sonar_blocker_issues'].divide(df['sonar_files_processed'])
    df['sonar_critical_issues_ratio'] = df['sonar_critical_issues'].divide(df['sonar_files_processed'])
    df['sonar_major_issues_ratio'] = df['sonar_major_issues'].divide(df['sonar_files_processed'])
    df['sonar_minor_issues_ratio'] = df['sonar_minor_issues'].divide(df['sonar_files_processed'])
    df_with_google_data = df[~df["rating_count"].isnull()]
    df_with_tests = df[df['tests']]
    df_without_tests = df[~df['tests']]
    df.to_csv("results_merged.csv")


    # from android_test_inspector.corr_analysis import correlation_matrix
    # correlation_matrix(df, output_file=path_join(results_output, "corr_matrix.pdf"))

    colors_dict = {
        'any': 'C0',
        'unit_test_frameworks': 'C1',
        'ui_automation_frameworks': 'C2',
        'cloud_test_services': 'C3',
        'ci_services': 'C4',
    }

    marker_dict = {
        'any': 'o',
        'unit_test_frameworks': 'v',
        'ui_automation_frameworks': '*',
        'cloud_test_services': 'H',
        'ci_services': 's',
    }

    linestyle_dict = {
        'any': '-',
        'unit_test_frameworks': ':',
        'ui_automation_frameworks': '--',
        'cloud_test_services': '-.',
    }

    # --- Number of projects by year --- #
    figure, ax = plt.subplots(figsize=(4, 2.5))
    df.groupby('age_numeric')['age_numeric'].count().plot.bar(
        color='black',
        width=0.25,
        ax=ax,
    )
    ax.tick_params(direction='out', top='off')
    ax.set_xlabel("Age")
    ax.set_ylabel("Number of apps")
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.yaxis.grid(linestyle='dotted')
    figure.tight_layout()
    figure.savefig(path_join(results_output, "app_age_count.pdf"))

    # --- Number of projects by framework --- #
    columns = (
        ['tests']
        + ['unit_tests'] + unit_test_frameworks
        + ['ui_tests'] + ui_automation_frameworks
        + ['cloud_tests'] + cloud_test_services
        # + ['ci/cd'] + ci_services
    )
    colors =  (
        [colors_dict['any']] +
        [colors_dict['unit_test_frameworks']] * (len(unit_test_frameworks) + 1)
        + [colors_dict['ui_automation_frameworks']] * (len(ui_automation_frameworks) + 1)
        + [colors_dict['cloud_test_services']] * (len(cloud_test_services) + 1)
        + [colors_dict['ci_services']] * (len(ci_services) + 1)
    )

    highlights = [
        'tests',
        'unit_tests',
        'ui_tests',
        'cloud_tests',
        'ci/cd',
    ]
    sums = df[columns].sum()
    labels = (label in highlights and "• All "+label or label for label in columns)
    labels = [label.title().replace("_", " ") for label in labels]
    heights = sums.values
    figure, ax = plt.subplots(1, 1)
    ax.bar(
        range(len(labels)),
        heights,
        0.5,
        color=colors,
        edgecolor = 'k',
        linewidth= [column in highlights and 0.9 or 0.0 for column in columns]
    )
    ax.set_xticklabels(labels, rotation='vertical')
    ax.set_xticks(range(len(labels)))
    ax.tick_params(direction='out', top='off')
    # ax.set_title("Number of projects by test framework")
    ax.set_ylabel("Number of projects (out of {})".format(len(df.index)))
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.yaxis.grid(linestyle='dotted')

    # ax2 = ax.twinx()
    # ax2.grid(False)
    # ax2.set_ylim(ax.get_ylim())
    # ax2.set_yticklabels(["{:.0%}".format(tick/len(df)) for tick in ax2.get_yticks()])
    # ax2.spines['right'].set_visible(False)
    # ax2.spines['top'].set_visible(False)
    # ax2.spines['left'].set_visible(False)
    # ax2.set_ylabel("Percentage of projects")

    def draw_range(ax, xmin, xmax, label):
        y=400
        ax.annotate('', xy=(xmin, y), xytext=(xmax, y), xycoords='data', textcoords='data',
                    arrowprops={'arrowstyle': '|-|', 'color':'black', 'linewidth': 0.5})
        xcenter = xmin + (xmax-xmin)/2
        ytext = y + ( ax.get_ylim()[1] - ax.get_ylim()[0] ) / 22
        ax.annotate(label, xy=(xcenter,ytext), ha='center', va='center', fontsize=9)

    draw_range(ax, 0.5, 5.5, "Unit testing")
    draw_range(ax, 5.5, 14.5, "GUI testing")
    draw_range(ax, 14.5, 21.5, "Cloud testing")
    # draw_range(ax, 21.5, 26.5, "CI/CD")

    figure.tight_layout()
    figure.savefig(path_join(results_output, "framework_count.pdf"))
    # --------------------------------------- #

    # --- Percentage of Android tests over the age of the apps --- #
    def tests_in_projects_by_time_of_creation(df_projects, frameworks, label=None,
                                              title=None,
                                              zorder=1, color=None,
                                              verbose=False, **kwargs):
        portions = []
        n_projects_with_tests_history = []
        total_projects_history = []
        age_max = df_projects['age_numeric'].max()+1
        for age in range(age_max):
            n_projects_with_tests = df_projects[df_projects['age_numeric']==age][frameworks].apply(any, axis=1).sum()
            n_projects_with_tests_history.append(n_projects_with_tests)
            total_projects = len(df_projects[df_projects['age_numeric']==age].index)
            total_projects_history.append(total_projects)
            if total_projects == 0:
                portion = 0
            else:
                portion = n_projects_with_tests/total_projects
            portions.append(portion)
            if verbose:
                print("Age {}:".format(age))
                print("{} out of {} projects ({:.1%}).".format(n_projects_with_tests, total_projects, portion))

        plt.plot(range(age_max), portions, label=label, zorder=zorder, **kwargs)
        plt.scatter(range(age_max), portions, total_projects_history, marker='o', linewidth='1', zorder=zorder)
        ax = plt.gca()
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.set_xticks(range(age_max))
        ax.set_yticklabels(["{:.0%}".format(label) for label in ax.get_yticks()])
        ax.set_ylabel("Percentage of projects")
        ax.yaxis.grid(linestyle='dotted', color='gray')
        if label:
            legend = ax.legend(loc='upper center', shadow=False)
        if title:
            ax.set_title(title)

    figure, ax = plt.subplots(1,1)
    tests_in_projects_by_time_of_creation(df, unit_test_frameworks+ui_automation_frameworks+cloud_test_services, label="Any", color=colors_dict['any'], zorder=2, linestyle=linestyle_dict['any'])
    tests_in_projects_by_time_of_creation(df, unit_test_frameworks, label="Unit testing", color=colors_dict['unit_test_frameworks'], zorder=3, linestyle=linestyle_dict['unit_test_frameworks'])
    tests_in_projects_by_time_of_creation(df, ui_automation_frameworks, label="GUI testing", color=colors_dict['ui_automation_frameworks'], zorder=4, linestyle=linestyle_dict['ui_automation_frameworks'])
    tests_in_projects_by_time_of_creation(df, cloud_test_services, label="Cloud testing", color=colors_dict['cloud_test_services'], zorder=5, linestyle=linestyle_dict['cloud_test_services'])

    ax.set_xlabel("Years since first commit")
    ax.axvspan(0,2, color='darkgreen', alpha=0.1)
    figure.tight_layout()
    figure.savefig(path_join(results_output, "tests_by_age.pdf"))
    ax.invert_xaxis()
    figure.savefig(path_join(results_output, "tests_by_age_i.pdf"))
    # ------------------------------------------------------------ #

    # --- Percentage of Android tests over the age of the apps (cumulated) --- #
    def tests_in_projects_by_time_of_creation_cumm(df_projects, frameworks,
                                                   title=None, verbose=False, **kwargs):
        project_with_test_per_age = []
        total_projects_per_age = []
        n_projects_with_tests_history = []
        total_projects_history = []
        age_max = df_projects['age_numeric'].max()+1
        for age in range(age_max)[::-1]:
            n_projects_with_tests = df_projects[df_projects['age_numeric']==age][frameworks].apply(any, axis=1).sum()
            n_projects_with_tests_history.append(n_projects_with_tests)
            total_projects = len(df_projects[df_projects['age_numeric']==age].index)
            total_projects_history.append(total_projects)
            project_with_test_per_age.append(n_projects_with_tests)
            total_projects_per_age.append(total_projects)
            if verbose:
                print("Age {}:".format(age))
                print("{} out of {} projects ({:.1%}).".format(n_projects_with_tests, total_projects, portion))
        project_with_test_per_age_cum = [sum(project_with_test_per_age[:index+1]) for index in range(len(project_with_test_per_age))]
        total_projects_per_age_cum = [sum(total_projects_per_age[:index+1]) for index in range(len(total_projects_per_age))]
        portions = []
        for with_tests, total in zip(project_with_test_per_age_cum, total_projects_per_age_cum):
            if total > 0:
                portions.append(with_tests/len(df_projects))
            else:
                portions.append(0)
        plt.plot(range(age_max)[::-1], portions, **kwargs)
        # plt.scatter(range(age_max)[::-1], portions, total_projects_history, marker='o', linewidth=1, zorder=kwargs.get('zorder'))
        plt.scatter(range(age_max)[::-1], portions, marker='.', linewidth=1, zorder=kwargs.get('zorder'))
        ax = plt.gca()
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.set_xticks(range(age_max))
        ax.set_yticklabels(["{:.0%}".format(label) for label in ax.get_yticks()])
        ax.set_ylabel("Percentage of projects")
        ax.yaxis.grid(linestyle='dotted', color='gray')
        ax.legend(loc='upper center', shadow=False)
        if title:
            ax.set_title(title)

    figure, ax = plt.subplots(1,1)
    tests_in_projects_by_time_of_creation_cumm(
        df,
        unit_test_frameworks+ui_automation_frameworks+cloud_test_services,
        label="Any", color=colors_dict['any'], zorder=2,
        linestyle=linestyle_dict['any'],
    )
    tests_in_projects_by_time_of_creation_cumm(
        df,
        unit_test_frameworks,
        label="Unit testing", color=colors_dict['unit_test_frameworks'], zorder=3,
        linestyle=linestyle_dict['unit_test_frameworks'],
    )
    tests_in_projects_by_time_of_creation_cumm(
        df,
        ui_automation_frameworks,
        label="GUI testing", color=colors_dict['ui_automation_frameworks'], zorder=4,
        linestyle=linestyle_dict['ui_automation_frameworks'],
    )
    tests_in_projects_by_time_of_creation_cumm(
        df,
        cloud_test_services,
        label="Cloud testing", color=colors_dict['cloud_test_services'], zorder=5,
        linestyle=linestyle_dict['cloud_test_services'],
    )
    ax.set_xlabel("Year")
    ax.axvspan(0,2, color='darkgreen', alpha=0.1)
    figure.tight_layout()
    figure.savefig(path_join(results_output, "tests_by_age_cumm.pdf"))
    ax.invert_xaxis()
    figure.savefig(path_join(results_output, "tests_by_age_cumm_i.pdf"))
    # ------------------------------------------------------------ #



    # --- Percentage of 2+years apps with tests grouped by time since last update --- #
    def tests_in_projects_by_time_of_update(df_projects, frameworks, label=None,
                                              title=None,
                                              verbose=False, zorder=None, color=None, **kwargs):
        portions = []
        n_projects_with_tests_history = []
        total_projects_history = []
        age_max = df_projects['time_since_last_update_numeric'].max()+1
        for age in range(age_max):
            n_projects_with_tests = df_projects[df_projects['time_since_last_update_numeric']==age][frameworks].apply(any, axis=1).sum()
            n_projects_with_tests_history.append(n_projects_with_tests)
            total_projects = len(df_projects[df_projects['time_since_last_update_numeric']==age].index)
            total_projects_history.append(total_projects)
            if total_projects == 0:
                portion = 0
            else:
                portion = n_projects_with_tests/total_projects
            portions.append(portion)
            if verbose:
                print("Age {}:".format(age))
                print("{} out of {} projects ({:.1%}).".format(n_projects_with_tests, total_projects, portion))

        plt.plot(range(age_max), portions, label=label, zorder=zorder, **kwargs)
        plt.scatter(range(age_max), portions, total_projects_history, marker='o', linewidth='1', zorder=zorder)
        ax = plt.gca()
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.spines['bottom'].set_visible(True)
        ax.set_xticks(range(age_max))
        ax.set_yticklabels(["{:.0%}".format(label) for label in ax.get_yticks()])
        ax.set_ylabel("Percentage of projects")
        ax.yaxis.grid(linestyle='dotted', color='gray')

        if label:
            legend = ax.legend(loc='upper center', shadow=False)
        if title:
            plt.title(title)

    figure, ax = plt.subplots(1,1)
    tests_in_projects_by_time_of_update(df_old, unit_test_frameworks+ui_automation_frameworks+cloud_test_services, label="Any", color=colors_dict['any'], linestyle=linestyle_dict['any'], zorder=1)
    tests_in_projects_by_time_of_update(df_old, unit_test_frameworks, label="Unit testing", color=colors_dict['unit_test_frameworks'], linestyle=linestyle_dict['unit_test_frameworks'], zorder=2)
    tests_in_projects_by_time_of_update(df_old, ui_automation_frameworks, label="GUI testing", color=colors_dict['ui_automation_frameworks'], linestyle=linestyle_dict['ui_automation_frameworks'], zorder=3)
    tests_in_projects_by_time_of_update(df_old, cloud_test_services, label="Cloud testing", color=colors_dict['cloud_test_services'], linestyle=linestyle_dict['cloud_test_services'], zorder=4)
    ax.set_xlabel("Years since last update")
    figure.tight_layout()
    figure.savefig(path_join(results_output, "mature_tests_by_update.pdf"))
    ax.invert_xaxis()
    figure.savefig(path_join(results_output, "mature_tests_by_update_i.pdf"))

    # ------------------------------------------------------------------------------- #

    # --- Descriptive stats for popularity metrics --- #
    dictionary = {
        "count": "$N$",
        "mean": "$\\bar{x}$",
        "std": "$s$",
        "min": "$min$",
        "max": "$max$",
        "rating_value": "Rating"
    }
    metrics = ['stars','forks', 'contributors', 'commits', 'rating_value', 'rating_count']


    def outliers_modified_z_score(ys):
        threshold = 3.5

        median_y = np.median(ys)
        median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in ys])
        modified_z_scores = [0.6745 * (y - median_y) / median_absolute_deviation_y
                             for y in ys]
        return (np.abs(modified_z_scores) > threshold)

    def outliers_z_score(ys):
        return np.abs(zscore(ys) < 3)

    def remove_outliers_df(df, metric):
        df = df.dropna(subset=[metric])
        return df[outliers_z_score(df[metric])]


    def remove_outliers(series):
        series = series[~series.isnull()]
        return series[outliers_z_score(series)]
        # return series[np.abs(zscore(series) < 3)]

    def _descriptive_stats(series, ):
        return (
            series.count(),
            series.mean(),
            series.std(),
            series.min(),
            series.quantile(0.25),
            series.median(),
            series.quantile(0.75),
            series.max(),
            shapiro(series)[1] < 0.01 and "$p < 0.01$",
        )

    stats = []
    for metric in metrics:
        metric_title = metric.title().replace("_", " ")
        df_tmp = remove_outliers_df(df, metric)
        df_tmp_tests = df_tmp[df_tmp['tests']]
        stats.append((
            f"\\multirow{{2}}{{*}}{{{metric_title}}}",
            '$W$',
            *_descriptive_stats(df_tmp_tests[metric])
        ))
        df_tmp_wo_tests = df_tmp[~df_tmp['tests']]
        stats.append((
            "",
            '$WO$',
            *_descriptive_stats(df_tmp_wo_tests[metric])
        ))
    old_escape_rules = T.LATEX_ESCAPE_RULES
    T.LATEX_ESCAPE_RULES = {'%': '\\%'}
    table = tabulate(
        stats,
        headers=['', 'Tests', '$N$', '$\\bar{x}$', '$s$', '$min$', '$25%$', '$Md$', '$75%$', '$max$', '$X \sim N$'],
        # showindex=issues_column,
        tablefmt='latex',
        floatfmt=".1f",
    )
    T.LATEX_ESCAPE_RULES = old_escape_rules
    with open(path_join(results_output, "popularity_metrics_stats_2.tex"), 'w') as f:
        f.write(table)

    stats = pandas.concat([remove_outliers(df[metric]).describe() for metric in metrics], axis=1)
    stats = stats.applymap((lambda x: "${:.1f}$".format(float(x)))).astype(str)
    stats[['stars','forks', 'contributors', 'commits', 'rating_count']] = stats[['stars','forks', 'contributors', 'commits', 'rating_count']].applymap((lambda x: "${:.0f}$".format(float(x[1:-1])))).astype(str)
    stats.loc['count']= stats.loc['count'].map((lambda x: "${:.0f}$".format(float(x[1:-1])))).astype(str)

    old_escape_rules = T.LATEX_ESCAPE_RULES
    T.LATEX_ESCAPE_RULES = {'%': '\\%'}
    with open(path_join(results_output, "popularity_metrics_stats.tex"), 'w') as f:
        f.write(tabulate(
            stats,
            headers=[dictionary.get(column, column.title().replace("_", " ")) for column in stats.columns],
            showindex=[dictionary.get(name, name) for name in stats.index],
            tablefmt='latex',
            floatfmt=".1f"
        ))
    T.LATEX_ESCAPE_RULES = old_escape_rules
    ###box plots instead
    figure, axes = plt.subplots(2, 3)
    for index, ax, metric in zip(range(len(metrics)), [ax for subaxes in axes for ax in subaxes], metrics):
        values = remove_outliers(df[metric])
        metric_title = metric.title().replace("_", " ")
        ax.boxplot(values, whis=[5,95], showmeans=True, meanline=True,showfliers=True)
        ax.set_xticklabels([metric_title])
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.spines['bottom'].set_visible(True)
        ax.yaxis.grid(linestyle='dotted', color='gray')
        if index != 4:
            ax.set_yscale('log')
            ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
        figure.tight_layout()
    figure.savefig(path_join(results_output, f"popularity_metrics_boxplot.pdf"))

    # -------------------------------------------------- #

    # --- Histogram for downloads --- #
    downloads_distribution = df_with_google_data.groupby('downloads')['downloads'].count()
    heights = df_with_google_data.groupby('downloads')['downloads'].count().values


    figure, ax = plt.subplots(1,1)
    labels = [
        str(human_format(int(cat.split(' - ')[0].replace(',',''))))
        + " – " +
        str(human_format(int(cat.split(' - ')[1].replace(',',''))))
        for cat in downloads_scale
    ]
    # ax.bar(
    #     range(len(labels)),
    #     heights,
    #     width=0.9,
    #     color=[column == '10,000 - 50,000' and 'C1' or 'C0' for column in downloads_scale],
    # )
    downloads_distribution.plot.bar(
        ax=ax,
        width=0.9,
        fontsize=14,
    )
    ax.set_xticklabels(labels, fontsize=14, rotation='vertical')
    ax.set_xlabel("Downloads", fontsize=15)
    ax.set_ylabel("Number of apps (out of {})".format(len(df.index)), fontsize=15)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['bottom'].set_visible(True)
    ax.yaxis.grid(linestyle='dotted', color='gray')

    # ax2 = ax.twinx()
    # ax2.grid(False)
    # ax2.set_ylim(ax.get_ylim())
    # ax2.set_yticklabels(["{:.0%}".format(tick/len(df_with_google_data)) for tick in ax2.get_yticks()], fontsize=14)
    # ax2.spines['right'].set_visible(False)
    # ax2.spines['top'].set_visible(False)
    # ax2.spines['left'].set_visible(False)
    # ax2.set_ylabel("Percentage of apps", fontsize=15)


    figure.tight_layout()
    figure.savefig(path_join(results_output, "downloads_hist.pdf"))
    # -------------------------------------------------- #

    # ---------- Hypothesis testing ------------- #
    popularity_metrics = [
        'stars',
        'forks',
        'contributors',
        'commits',
        'rating_value',
        'rating_count',
        # 'downloads'
    ]

    def cohen_d(y,x):
        nx = len(x)
        ny = len(y)
        dof = nx + ny - 2
        return (np.mean(x) - np.mean(y)) / np.sqrt(((nx-1)*np.std(x, ddof=1) ** 2 + (ny-1)*np.std(y, ddof=1) ** 2) / dof)

    def analyze_populations(a,b, continuous=True):
        mean_difference = np.mean(b) - np.mean(a)
        median_difference = np.median(b) - np.median(a)
        improvement = mean_difference/np.mean(b)
        ks_test, ks_p = ks_2samp(a,b)
        mwu_test, mwu_p = mannwhitneyu(a,b, alternative='two-sided')

        return {
            # 'MW': "${:.4f}$".format(mwu_p),
            # 'KS': continuous and "${:.4f}$".format(ks_p) or "n.a.",
            'Test': continuous and "${:,.0f}$".format(ks_test) or "${:,.0f}$".format(mwu_test),
            '$p$-value': continuous and ks_p or mwu_p,
            '$\\Delta\\bar{x}$': "${:,.2f}$".format(mean_difference),
            '$\\Delta Md$': "${:,.2f}$".format(median_difference),
            'CL (%)': f"${cles(a,b):,.2%}$",
            'Cohen\'s $d$': f"${cohen_d(a,b):,.4f}$",
            '$d_r$': "${:.1%}$".format(improvement),
        }

    tests = []
    for metric in popularity_metrics:
        df_wo_outliers = remove_outliers_df(df, metric)
        tests.append(
            analyze_populations(
                df_wo_outliers[~df_wo_outliers['tests']][metric],
                df_wo_outliers[df_wo_outliers['tests']][metric],
                False
            )
        )

    # Apply multiple test correction ()
    pvalues = [test['$p$-value'] for test in tests]
    _,pvalues,*_ = multipletests(pvalues, alpha=0.05, method='fdr_bh')
    for test, pvalue in zip(tests, pvalues):
        test['$p$-value'] = "${:.4f}$".format(pvalue)


    old_escape_rules = T.LATEX_ESCAPE_RULES
    T.LATEX_ESCAPE_RULES = {'%': '\\%'}
    with open(path_join(results_output, "popularity_metrics_test.tex"), 'w') as f:
        f.write(tabulate(
            tests,
            headers='keys',
            showindex=[metric.title().replace("_"," ") for metric in popularity_metrics],
            tablefmt='latex',

        ))
    T.LATEX_ESCAPE_RULES = old_escape_rules
    # ------------------------------------------- #

    # ---------- Tests vs Rating with Rating count ------------- #
    x = range(0, 10000 , 100)
    y_with_tests = tuple(df_with_tests[df_with_tests['rating_count']>i]['rating_value'].mean() for i in x)
    y_without_tests = tuple(df_without_tests[df_without_tests['rating_count']>i]['rating_value'].mean() for i in x)

    figure, ax = plt.subplots()
    ax.scatter(x, y_with_tests, marker='o', color='C0', label="With tests", zorder=2)
    ax.plot(x, y_with_tests, alpha=0.5, color='C0', zorder=1)
    ax.scatter(x, y_without_tests, marker='2', color='r', label="Without tests", zorder=2)
    ax.plot(x, y_without_tests, alpha=0.5, color='r', zorder=1)
    ax.legend(loc='upper center')

    ax.set_ylabel("Rating")
    ax.set_xlabel("Rating count >")
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)

    figure.tight_layout()
    figure.savefig(path_join(results_output, "rating_with_lower_limit.pdf"))
    # --------------------------------------------------------- #

    # ------------------ CI/CD platforms hist --------------- #

    figure, ax = plt.subplots()
    namepedia={
        "circleci": "Circle CI",
        "travis": "Travis CI",
    }
    df[['ci/cd']+ci_services].sum().plot.bar(
        fontsize=15, edgecolor = 'k', color='black', width=0.25, linewidth = [1]+[0]*len(ci_services)
    )
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.yaxis.grid(linestyle='dotted', color='gray')
    ax.set_ylabel("Number of apps (out of {})".format(len(df.index)), fontsize=15)
    ax.set_xticklabels(["All"]+[namepedia.get(key, key.title().replace('_', ' ')) for key in ci_services])

    # ax2 = ax.twinx()
    # ax2.grid(False)
    # ax2.set_ylim(ax.get_ylim())
    # ax2.set_yticklabels(["{:.0%}".format(tick/len(df)) for tick in ax2.get_yticks()], fontsize=15)
    # ax2.spines['right'].set_visible(False)
    # ax2.spines['top'].set_visible(False)
    # ax2.spines['left'].set_visible(False)
    # ax2.set_ylabel("Percentage of apps", fontsize=15)

    for p in ax.patches:
        ax.annotate("{:.0f}".format(p.get_height()), (p.get_x() +p.get_width()/2, p.get_height()+4), ha='center', fontsize=14)
    figure.tight_layout()
    figure.savefig(path_join(results_output, "ci_cd_hist.pdf"))
    # ------------------------------------------------------- #

    # ---------------- Mosaic CI/CD ---------------- #
    from statsmodels.graphics.mosaicplot import mosaic
    def properties(keys):
        keys = list(map(lambda i: i == 'True', keys))
        if all(keys):
            return {'color': 'lightgreen'}
        elif any(keys):
            return {'color': 'lightgoldenrodyellow'}
        return {'color': 'lightcoral'}

    figure, ax  = plt.subplots(figsize=(4.5,3.5))
    labelizer = lambda k: {
        ('False','False'): 'A. No Tests and no CI/CD\n({:.1%})'.format(1 - df[["tests", "ci/cd"]].any(axis=1).sum()/len(df)),
        ('True','False'): 'B. With Tests but\nno CI/CD\n({:.1%})'.format(sum(df["tests"] & ~df["ci/cd"])/len(df)),
        ('False','True'): 'C. No Tests but with CI/CD\n({:.1%})'.format(sum(~df["tests"] & df["ci/cd"])/len(df)),
        ('True','True'): 'D. With Tests and\nwith CI/CD\n({:.1%})'.format(df[["tests", "ci/cd"]].all(axis=1).sum()/len(df)),
    }.get(k, k)

    mosaic(df, ["tests", "ci/cd"], properties= properties, labelizer=labelizer, ax=ax)
    ax.set_xticklabels(['No tests', 'With tests'])
    ax.set_yticklabels(['With CI/CD', 'No CI/CD'])
    # ax.spines['left'].linewidth = 1
    # ax.spines['top'].linewidth = 1
    # ax.spines['right'].linewidth = 1
    # ax.spines['bottom'].linewidth = 1
    ax.invert_yaxis()
    figure.tight_layout()
    figure.savefig(path_join(results_output, "ci_cd_mosaic.pdf"))

    obs = [
        [sum(~df["tests"] & df["ci/cd"]), sum(~df["tests"] & ~df["ci/cd"])], #No tests
        [sum(df["tests"] & df["ci/cd"]), sum(df["tests"] & ~df["ci/cd"])] #Tests
    ]
    chi,pvalue,dof,_ = chi2_contingency(obs)
    print("Relationship between Ci/CD and Automated testing:")
    print("Chi={}, dof={}, p={}".format(chi, dof, pvalue))
    # ------------------------------------------------------- #

    # ------------------ Sonar vs tests --------------- #
    features = [
        # 'sonar_issues_ratio',
        'sonar_blocker_issues_ratio',
        'sonar_critical_issues_ratio',
        'sonar_major_issues_ratio',
        'sonar_minor_issues_ratio'
    ]
    names = [
        # 'Any',
        'Blocker',
        'Critical',
        'Major',
        'Minor'
    ]
    options = {
        'sym':       '',
        'meanline':  True,
        'showmeans': True,
        'patch_artist': True,
    }

    figure, ax = plt.subplots(1,1)
    boxplot = ax.boxplot(
        [
            df_tmp[feature].dropna().values
            for feature in features
            for df_tmp in (df_with_tests, df_without_tests)
        ],
        labels=(
            'With Tests',
            'Without Tests'
        )*len(features),
        **options
    )

    colors = (
        'C0',
        'darkred'
    )*len(features)
    hatches = (
        '/',
        ''
    )*len(features)
    for patch, color, hatch in zip(boxplot['boxes'], colors, hatches):
        patch.set_edgecolor(color)
        patch.set_facecolor((1,1,1,0.8))
        patch.set_hatch(hatch)
        patch.set_alpha(0.9)
    for cap, whisker, color in zip(boxplot['caps'], boxplot['whiskers'], np.repeat(colors,2)):
        cap.set_color(color)
        whisker.set_color(color)

    # legend
    circ1 = mpatches.Patch(facecolor='white', edgecolor=colors[0], hatch=hatches[0], label='With Tests')
    circ2 = mpatches.Patch(facecolor='white', edgecolor=colors[1], hatch=hatches[1], label='Without Tests')
    ax.legend(handles=(circ1,circ2), facecolor='white')
    # -----

    ax.yaxis.grid(linestyle='dotted', color='gray')
    ax.set_xticklabels(names)
    xticks = np.arange(1.5, len(features)*2+0.5, 2)
    ax.set_xticks(xticks)
    ax.set_ylabel('Number of issues per file')
    ax.set_xlabel('Severity of issues')

    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)


    mean_differences = [
        df_without_tests[feature].dropna().mean() -
        df_with_tests[feature].dropna().mean()
        for feature in features
    ]
    median_differences = [
        df_without_tests[feature].dropna().median() -
        df_with_tests[feature].dropna().median()
        for feature in features
    ]

    relative_differences = [
        int((df_without_tests[feature].dropna().median() - df_with_tests[feature].dropna().median()) / df_with_tests[feature].dropna().median()*100)
        for feature in features
    ]
    cles_values = [
        "{:.2%}".format(cles(
            df_with_tests[feature].dropna(),
            df_without_tests[feature].dropna()
        ))
        for feature in features
    ]
    cohensd_values = [
        cohen_d(
            df_with_tests[feature].dropna(),
            df_without_tests[feature].dropna()
        )
        for feature in features
    ]

    tester = ks_2samp
    tester = mannwhitneyu
    # tester = ttest_ind
    pvalues = [
        tester(
            df_without_tests[feature].dropna().values,
            df_with_tests[feature].dropna().values,
            # alternative="two-sided"
            # equal_var=False,
        ).pvalue
        for feature in features
    ]
    #multiple test correction ()
    _,pvalues,*_ = multipletests(pvalues, alpha=0.05, method='fdr_bh')

    # # Add info boxes to the boxplot
    # bbox_props_not_significant = dict(boxstyle="round,pad=0.3", fc=(1,1,1,0.8), ec='lightgray', lw=0.5)
    # bbox_props_significant = dict(boxstyle="round,pad=0.3", fc=(1,1,1,0.8), ec='black', lw=0.5)
    # for name, x, mean_difference, median_difference, pvalue in zip(names, xticks, mean_differences, median_differences, pvalues):
    #     if pvalue < 0.05:
    #         bbox_props = bbox_props_significant
    #     else:
    #         bbox_props = bbox_props_not_significant
    #     ax.annotate(
    #         (
    #             r"$\Delta\bar{{x}} = {:.2f}$".format(mean_difference)+"\n"+
    #             r"$\Delta Md = {:.2f}$".format(median_difference)+"\n"+
    #             r"$p = {:.4f}$".format(pvalue)
    #         ),
    #         (x,2.5),
    #         va='top', ha='center',
    #         fontsize=11,
    #         bbox=bbox_props
    #     )
    for patch,pvalue,color in zip(boxplot['boxes'], np.repeat(pvalues,2), colors):
        if pvalue < 0.05:
            # patch.set_facecolor((1.0,1.0,0.8,0.7))
            # patch.set_facecolor(color)
            # patch.set_hatch("\\")
            patch.set_linewidth(2)

    figure.tight_layout()
    figure.savefig(path_join(results_output, "sonar_vs_tests.pdf"))

    #SONAR ISSUEs SIGNIFICANCE RESULTS TABLE
    table_values = list(zip(names, mean_differences, median_differences, relative_differences, cles_values, cohensd_values, pvalues))
    old_escape_rules = T.LATEX_ESCAPE_RULES
    T.LATEX_ESCAPE_RULES = {'%': '\\%'}
    table = tabulate(
        table_values,
        headers=['Severity', r"$\Delta\bar{{x}}$", r"$\Delta Md$", r"$\frac{\Delta{}Md}{Md_W}$(%)",'CL (%)','Cohen\'s $d$', '$p$-value'],
        # showindex=issues_column,
        tablefmt='latex',
        floatfmt=".4f",
    )
    T.LATEX_ESCAPE_RULES = old_escape_rules
    with open(path_join(results_output, "sonar_metrics_test.tex"), 'w') as f:
        f.write(table)


    from itertools import chain
    issues_column = list(chain.from_iterable([("\multirow{{2}}{{*}}{{{}}}".format(name), ' ') for name in names]))
    old_escape_rules = T.LATEX_ESCAPE_RULES
    T.LATEX_ESCAPE_RULES = {'%': '\\%'}
    table = tabulate(
        [
            (
                sample_name,
                df_tmp[feature].dropna().count(),
                "${:.4f}$".format(df_tmp[feature].dropna().median()),
                "${:.4f}$".format(df_tmp[feature].dropna().mean()),
                "${:.4f}$".format(df_tmp[feature].dropna().std()),
                shapiro(df_tmp[feature].dropna())[1] < 0.0001 and "$p < 0.0001$",
            )
            for feature in features
            for (df_tmp, sample_name) in ((df_with_tests, '$W$'), (df_without_tests, '$WO$'))
        ],
        headers=['Tests', '$N$', '$Md$', '$\\bar{x}$', '$s$', '$X \sim N$'],
        showindex=issues_column,
        tablefmt='latex',
    )
    T.LATEX_ESCAPE_RULES = old_escape_rules
    with open(path_join(results_output, "sonar_metrics.tex"), 'w') as f:
        f.write(table)
    # ------------------------------------------------- #


    ###############
    # Hall of Fame
    ###############
    hall_of_fame = df[df[['ci/cd', 'unit_tests', 'ui_tests']].all(axis=1)].sort_values('stars', ascending=False)
    categories = hall_of_fame['category'].unique()
    small_hall_of_fame = [hall_of_fame[hall_of_fame['category']==category].iloc[0][['user', 'project_name']] for category in categories ]
    small_hall_of_fame_table = tabulate(
        small_hall_of_fame,
        headers=['Category', 'Organization', 'Project Name'],
        showindex=list(categories),
        tablefmt='latex',
    )
    with open(path_join(results_output, "small_hall_of_fame.tex"), 'w') as f:
        f.write(small_hall_of_fame_table)
    #############

    #### Categories ######
    figure, ax = plt.subplots(figsize=(6.4, 4))
    (df[['app_id','category']]
     .groupby('category')
     .count()
     .plot.bar(color='black', width=0.25, ax=ax))
    ax.legend().remove()
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.yaxis.grid(linestyle='dotted', color='gray')
    ax.set_xlabel('Category')
    ax.set_ylabel('Number of Apps')

    figure.tight_layout()
    figure.savefig(path_join(results_output, "categories.pdf"))
    ######################

    # --- Percentage of Android tests over the age of the apps (cumulated) --- #
    def tests_in_projects_by_time_of_creation_cumm(df_projects, frameworks,
                                                   title=None, verbose=False, **kwargs):
        project_with_test_per_age = []
        total_projects_per_age = []
        n_projects_with_tests_history = []
        total_projects_history = []
        age_max = df_projects['age_numeric'].max()+1
        for age in range(age_max)[::-1]:
            n_projects_with_tests = df_projects[df_projects['age_numeric']==age][frameworks].apply(any, axis=1).sum()
            n_projects_with_tests_history.append(n_projects_with_tests)
            total_projects = len(df_projects[df_projects['age_numeric']==age].index)
            total_projects_history.append(total_projects)
            project_with_test_per_age.append(n_projects_with_tests)
            total_projects_per_age.append(total_projects)
            if verbose:
                print("Age {}:".format(age))
                print("{} out of {} projects ({:.1%}).".format(n_projects_with_tests, total_projects, portion))
        project_with_test_per_age_cum = [sum(project_with_test_per_age[:index+1]) for index in range(len(project_with_test_per_age))]
        total_projects_per_age_cum = [sum(total_projects_per_age[:index+1]) for index in range(len(total_projects_per_age))]
        portions = []
        for with_tests, total in zip(project_with_test_per_age_cum, total_projects_per_age_cum):
            if total > 0:
                portions.append(with_tests/len(df_projects))
            else:
                portions.append(0)
        plt.plot(range(age_max)[::-1], portions, **kwargs)
        plt.scatter(
            range(age_max)[::-1], portions, total_projects_history,
            marker='o',
            zorder=kwargs.get('zorder'),
            color=kwargs.get('color')
        )
        ax = plt.gca()
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.set_xticks(range(age_max)[::-1])
        ax.set_yticklabels(["{:.0%}".format(label) for label in ax.get_yticks()])
        ax.set_ylabel("Percentage of projects")
        ax.yaxis.grid(linestyle='dotted', color='gray')
        ax.legend(loc='upper center', shadow=False)
        if title:
            ax.set_title(title)

    figure, ax = plt.subplots(1,1)
    tests_in_projects_by_time_of_creation_cumm(
        df,
        unit_test_frameworks+ui_automation_frameworks+cloud_test_services,
        label="Any", color=colors_dict['any'], zorder=2,
        linestyle=linestyle_dict['any'],
    )
    tests_in_projects_by_time_of_creation_cumm(
        df,
        ['no_tests'],
        label="No tests", color='darkred', zorder=5,
        linestyle="--",
    )
    ax.set_xlabel("Years since first commit")
    ax.axvspan(0,2, color='darkgreen', alpha=0.1)
    figure.tight_layout()
    figure.savefig(path_join(results_output, "tests_by_age_cumm_3.pdf"))
    ax.invert_xaxis()
    figure.savefig(path_join(results_output, "tests_by_age_cumm_3_i.pdf"))