Beispiel #1
0
def Load():
    # parse the xmls and (re)build the models and clips index
    global ModelDemos, ModelDirs
    ModelDirs = {}
    ModelDemos = {}

    if cfg.vdhd_data is None:
        return

    for dir_name in os.listdir(cfg.vdhd_data):
        # inside the vghd data dir,
        # each dir is named as the card no,
        model_dir = pj(cfg.vdhd_data, dir_name)
        if os.path.isdir(model_dir):
            # for each such dir,
            for file_item in os.listdir(model_dir):
                # if there is an xml file,
                if file_item.endswith(".xml") and "cache" not in file_item:
                    # store the dir.
                    ModelDirs[dir_name] = model_dir
                    ModelDemos[dir_name] = []
                    demo_dir = pj(cfg.vghd_models, dir_name)
                    # if a same dir exists inside the vghd models dir,
                    if os.path.isdir(demo_dir):
                        # store the clips.
                        for demo_file in os.listdir(demo_dir):
                            # we will need to change here if we want 
                            # to include more than demo files
                            if demo_file.endswith(".demo"):
                                ModelDemos[dir_name].append(demo_file)
def clean(rep="."):
    """Thorough cleaning of all arborescence rooting at rep.

    Args:
        rep: (str) root directory to start the walk

    Returns:
        None
    """
    for name in ("build", "dist"):
        pth = pj(rep, name)
        if exists(pth):
            rmtree(pth)

    for root, dnames, fnames in walk(rep):
        # do not walk directories starting with "."
        for name in tuple(dnames):
            if "clean.no" in listdir(pj(root, name)):
                dnames.remove(name)
            elif name.startswith("."):
                dnames.remove(name)
            elif name == "__pycache__":
                rmtree(pj(root, name))
                dnames.remove(name)

        for name in fnames:
            if not name.startswith("."):
                if splitext(name)[1] in [".pyc", ".pyo"]:
                    remove(pj(root, name))
Beispiel #3
0
def docker_build(dist=None, force=False):
    ''' build Docker image from ./docker/<dist>/Dockerfile '''

    force = prep_bool_arg(force)

    def build(dist, force):
        image_existing = False
        if not force:
            with settings(warn_only=True):
                image_existing = local('docker images | grep -q package_build/{dist}'.format(dist=dist)).succeeded

        if not image_existing:
            print '(re)building image {}...'.format(dist)
            local('docker build --tag=package_build/{dist} {path}/docker/{dist}/'.format(dist=dist, path=PATH))

    if dist:
        print 'building Docker image for {}...'.format(dist)
        builddir = pj(PATH, 'docker', dist)
        os.path.isdir(builddir) or abort('{} dir is not existing'.format(builddir))
        build(dist, force)
    else:
        print 'building Docker images for all distributions...'
        for entry in os.listdir(pj(PATH, 'docker')):
            if os.path.isdir(pj(PATH, 'docker', entry)):
                build(entry, force)
Beispiel #4
0
def my_special_function(name):
    """ This function will create 42 C files. """
    path = pj(EXPERIMENT_FOLDER, name)
    if not os.path.exists(path):
        os.makedirs(path)

    c_template = TEMPLATE_ENV.get_template("dummy_template.c")
    # We make the id start at 1 and finish at 42
    for value in range(1, 43):
        with open(pj(path, "dummy_%d.c" % value), "w") as f:
            f.write(c_template.render(my_value=value))

    # If you change the platform target and want to push to iotlab
    # don't forget to update the nodes names
    makefile_template = TEMPLATE_ENV.get_template("dummy_makefile")
    with open(pj(path, "Makefile"), "w") as f:
        f.write(makefile_template.render(contiki=CONTIKI_FOLDER,
                                             target="iotlab-m3"))

    config_template = TEMPLATE_ENV.get_template("dummy_iotlab.json")
    res = [
        {"nodes": ["m3-%d.grenoble.iot-lab.info" % num],
         "firmware_path": pj(path, "dummy_%d.iotlab-m3" % num)
       } for num in range(1, 43)]
    with open(pj(path, "iotlab.json"), "w") as f:
        f.write(json.dumps(res, sort_keys=True,
                  indent=4, separators=(',', ': ')))
Beispiel #5
0
def package_build(dist=None, recipe='', upload=False):
    ''' build packages from recipe for dist '''

    if dist:
        dists = [dist]
    else:
        dists = [d for d, _ in PACKAGE_FORMAT.items()]

    for dist in dists:
        start_time = time.time()
        execute('docker_run', dist, '/data/cook-recipe.sh {}'.format(recipe))

        for root, dirs, files in os.walk(pj(PATH, 'recipes')):
            for file in files:
                if file == 'lastbuild' and os.path.getmtime(pj(root, file)) >= start_time:
                    package_name = ''
                    with open(pj(root, file), 'r') as fh:
                        package_name = fh.readline().strip()
                        print('package_name: {}'.format(package_name))
                    if package_name:
                        package_format = package_name.split('.')[-1]
                        dist = root.split('/')[-1]
                        if upload:
                            execute('repo_{0}_add'.format(package_format), pj(root, package_name), dist)
        print 'task ran {0} seconds'.format(time.time() - start_time)
Beispiel #6
0
 def setUp(self):
     self.bin = pj(program_path, "DBNDownBeatTracker")
     self.activations = Activations(
         pj(ACTIVATIONS_PATH, "sample.downbeats_blstm.npz"))
     self.result = np.loadtxt(
         pj(DETECTIONS_PATH, "sample.dbn_downbeat_tracker.txt"))
     self.downbeat_result = self.result[self.result[:, 1] == 1][:, 0]
Beispiel #7
0
 def setUp(self):
     self.bin = pj(program_path, "GMMPatternTracker")
     self.activations = Activations(
         pj(ACTIVATIONS_PATH, "sample.gmm_pattern_tracker.npz"))
     self.result = np.loadtxt(
         pj(DETECTIONS_PATH, "sample.gmm_pattern_tracker.txt"))
     self.downbeat_result = self.result[self.result[:, 1] == 1][:, 0]
Beispiel #8
0
    def default(self):

        if CONDA_ENV:
            qt_dir = CONDA_LIBRARY_PREFIX
            self._default["QTDIR"] = qt_dir
            self._default["QT5_BINPATH"] = pj(qt_dir, 'bin')
            self._default["QT5_CPPPATH"] = pj(qt_dir, 'include')
            self._default["QT5_LIBPATH"] = pj(qt_dir, 'lib')
            self._default["QT5_FRAMEWORK"] = False
            return

        qt_dir = os.getenv("QTDIR")
        qt_lib = '$QTDIR/lib'
        qt_bin = '$QTDIR/bin'
        qt_inc = '$QTDIR/include'
        qt_fmk = False

        if isinstance(platform, Linux):
            # Use LSB spec
            qt_dir = '/usr'
            qt_bin = '/usr/bin'
            qt_inc = '/usr/include/qt5'
            qt_lib = '/usr/lib'
        elif isinstance(platform, Darwin) and exists('/opt/local/libexec/qt5'):
            qt_dir = '/opt/local/libexec/qt5'
            qt_bin = '/opt/local/libexec/qt5/bin'
            qt_inc = '/opt/local/libexec/qt5/include'
            qt_lib = '/opt/local/libexec/qt5/lib'
        elif not qt_dir:
            try:
                if isinstance(platform, Win32) or isinstance(platform, Darwin):
                    # Try to use openalea egg
                    from openalea.deploy import get_base_dir
                    qt_dir = get_base_dir("qt5-dev")
            except:
                if isinstance(platform, Win32):
                    try:
                        from openalea.deploy import get_base_dir
                        qt_dir = get_base_dir("qt5")
                    except:
                        # Try to locate bin/moc in PATH
                        qt_dir = find_executable_path_from_env("moc.exe", strip_bin=True)

                elif isinstance(platform, Posix):
                    qt_dir = pj('/usr', 'lib', 'qt5')
                    if not exists(pj(qt_dir, 'bin')):
                        # Use LSB spec
                        qt_dir = '/usr'
                        qt_bin = '/usr/bin'
                        qt_inc = '/usr/include/qt5'
                        qt_lib = '/usr/lib'

        if isinstance(platform, Darwin):
            qt_fmk = True

        self._default["QTDIR"] = qt_dir
        self._default["QT5_BINPATH"] = qt_bin
        self._default["QT5_CPPPATH"] = qt_inc
        self._default["QT5_LIBPATH"] = qt_lib
        self._default["QT5_FRAMEWORK"] = qt_fmk
Beispiel #9
0
 def setUp(self):
     self.bin = pj(program_path, "DBNBeatTracker")
     self.activations = Activations(
         pj(ACTIVATIONS_PATH, "sample.beats_blstm.npz"))
     self.result = np.loadtxt(
         pj(DETECTIONS_PATH, "sample.dbn_beat_tracker.txt"))
     self.online_results = [0.47, 0.79, 1.48, 2.16, 2.5]
Beispiel #10
0
    def default(self):
        self._default['libs_suffix'] = '$compiler_libs_suffix'
        self._default['flags'] = ''
        self._default['defines'] = ''

        isPosix = isinstance(platform, Posix)        



        # -- lets now look for decent flags --
        self._default['flags'] = self.get_default_flags()
        self._default['defines'] = self.get_default_defines()

        # -- lets now look for decent include dirs --
        try:
            # Try to use openalea egg
            from openalea.deploy import get_base_dir
            try:
                base_dir = get_base_dir("boost")
            except:                
                base_dir = get_base_dir("boostpython")
            self._default['include'] = pj(base_dir, 'include')
            self._default['lib'] = pj(base_dir, 'lib')
            self.__usingEgg = True
        except:
            try:
                import openalea.config as conf
                self._default['include'] = conf.include_dir
                self._default['lib'] = conf.lib_dir

            except ImportError, e:                
                self._default['include'] = '/usr/include' if isPosix else pj(os.getcwd(), "include")
                self._default['lib']     = '/usr/lib' if isPosix else pj(os.getcwd(), "lib")
def install():
    
    from openalea.deploy.shortcut import create_win_shortcut, set_win_reg, create_fd_shortcut
    from openalea.deploy import get_base_dir
    import sys
    from os.path import join as pj

    # Get the location of the installed egg
    base_dir = get_base_dir('openalea.visualea')
    share_dir = pj(base_dir, 'share')
    
    winexe = sys.executable
    winexe = winexe.replace('python.exe', 'pythonw.exe')
    
    prefix = base_dir.lower().split("lib")[0]
        
    create_win_shortcut(name = 'Visualea',
                        target = winexe,
                        arguments = '"'+pj(prefix, 'Scripts', 'visualea-script.pyw')+'"',
                        startin = "", 
                        icon = pj(share_dir, 'openalea_icon.ico'),
                        description = "Visual programming",
                        menugroup = "OpenAlea")

    create_win_shortcut(name = 'Python Shell',
                        target = winexe,
                        arguments = '"'+pj(prefix, 'Scripts', 'aleashell-script.pyw')+'"',
                        startin = "", 
                        icon = "",
                        description = "Python Shell",
                        menugroup = "OpenAlea")
Beispiel #12
0
def main(top):
    top = os.path.abspath(top)
    if not exists(pj(top, "abipy")) or not exists(pj(top, "setup.py")): 
        raise ValueError("top %s is not the top-level abipy directory" % top)

    os.chdir(top)

    sys.path.insert(0, top)

    import cProfile
    import pstats

    prof_file = pj(top, ".abipy_prof")

    # Profile the import of the package
    cProfile.run('abipy_import()', prof_file)

    p = pstats.Stats(prof_file)
    #If you were looking to see what functions were looping a lot, and taking a lot of time, you would do:
    p.sort_stats('time').print_stats(10)

    #This will sort all the statistics by file name, and then print out statistics for only the class init methods 
    #(since they are spelled with
    #__init__ in them). 
    #p.sort_stats('file').print_stats('__init__')

    return 0
Beispiel #13
0
   def default(self):

      if isinstance(platform, Win32):
      
         self._default['flags'] = ''
         self._default['defines'] = ''

         try:
            cgalroot = os.environ['CGALROOT']
            self._default['include'] = pj(cgalroot,'auxiliary','gmp','include')
            self._default['libpath'] = pj(cgalroot,'auxiliary','gmp','lib')
            self._default['libs'] = 'libgmp-10'            
         except:
            try:
               import openalea.config as conf
               self._default['include'] = conf.include_dir
               self._default['libpath'] = conf.lib_dir
            except ImportError, e:
               try:
                  import pkg_resources as pkg
                  egg_env = pkg.Environment()
                  mingw_base = egg_env["mingw"][0].location
                  self._default['include'] = pj(mingw_base, "include")
                  self._default['libpath'] = pj(mingw_base, "lib")
               except Exception, e:
                  self._default['include'] = 'C:' + os.sep
                  self._default['libpath'] = 'C:' + os.sep
               
            self._default['libs'] = 'gmp'
Beispiel #14
0
 def set_data_files(self, share_path):
     for root, dirs, file_names in os.walk('data/'):
         files = []
         for file_name in file_names:
             if file_name.endswith('.pyc'):
                 continue
             elif file_name.endswith('.dat') and \
                     file_name.startswith('Geo'):
                 continue
             elif file_name == "ooniprobe.conf.sample":
                 files.append(self.gen_config(share_path))
                 continue
             files.append(pj(root, file_name))
         self.distribution.data_files.append(
             [
                 pj(share_path, 'ooni', root.replace('data/', '')),
                 files
             ]
         )
     settings = SafeConfigParser()
     settings.add_section("directories")
     settings.set("directories", "data_dir",
                  os.path.join(share_path, "ooni"))
     with open("ooni/settings.ini", "w+") as fp:
         settings.write(fp)
Beispiel #15
0
def find_login_passwd(allow_user_input=True):
    home = ""
    # Get password
    if 'USERPROFILE' in os.environ:
        home = os.environ['USERPROFILE']

    elif 'HOME' in os.environ:
        home = os.environ['HOME']

    rc = pj(home, '.pypirc')
    if not exists(rc):
        matched = glob.glob(pj(home, "*pydistutils.cfg"))
        if len(matched):
            rc = matched[0]

    username, password = None, None
    if exists(rc):
        print('Using PyPI login from %s' % (rc))
        config = configparser.ConfigParser({
            'username': '',
            'password': '',
            'repository': ''})
        config.read(rc)

        username = config.get('server-login', 'username')
        password = config.get('server-login', 'password')
    elif allow_user_input:
        username = input("Enter your GForge login:"******"Enter you GForge password:")
    return username, password
Beispiel #16
0
def regenerate_package(env, target=".", overwrite=False):
    """Rebuild all automatically generated files.

    Args:
        env (jinja2.Environment): current working environment
        target (str): target directory to write into
        overwrite (bool): default False, whether or not
                         to overwrite user modified files

    Returns:
        None
    """
    # check consistency of env params
    invalids = []
    for option in installed_options(env):
        for n in check_option_parameters(option, env):
            invalids.append("%s.%s" % (option, n))

    if len(invalids) > 0:
        for param in invalids:
            logger.warning("param %s is not valid", param)

        return False

    # check for potential conflicts
    hm_ref = get_pkg_hash(target)

    conflicted = []
    for file_pth in hm_ref:
        pth = pj(target, file_pth)
        if exists(pth) and modified_file_hash(pth, hm_ref):
            conflicted.append(pth)
        else:
            # file disappeared, regenerate_dir will reload it if managed by pkglts
            pass

    overwrite_file = {}
    if len(conflicted) > 0:
        if overwrite:
            for name in conflicted:
                logger.debug("conflicted, '%s'" % name)
                overwrite_file[pth_as_key(name)] = True
        else:
            for name in conflicted:
                print("A non editable section of %s has been modified" % name)
                overwrite_file[pth_as_key(name)] = get_user_permission("overwrite", False)

    # render files for all options
    hm = {}
    for name in installed_options(env):
        opt_ref_dir = pj(get_data_dir(), 'base', name)
        if not exists(opt_ref_dir):
            logger.debug("option %s do not provide files" % name)
        else:
            logger.info("rendering option %s" % name)
            loc_hm = regenerate_dir(opt_ref_dir, target, env, overwrite_file)
            hm.update(loc_hm)

    hm_ref.update(hm)
    write_pkg_hash(hm_ref, target)
Beispiel #17
0
def clean(rep="."):
    """Thorough cleaning of all arborescence rooting at rep.

    Todo: exception list instead of hardcoded one

    Args:
        rep (str): default ".", top directory to clean

    Returns:
        None
    """
    for name in ("build", "dist", "doc/_dvlpt", "doc/build"):
        pth = normpath(pj(rep, name))
        if exists(pth):
            rmtree(pth)

    for root, dnames, fnames in walk(rep):
        # do not walk directories starting with "."
        for name in tuple(dnames):
            if "clean.no" in listdir(pj(root, name)):
                dnames.remove(name)
            elif name.startswith("."):
                dnames.remove(name)
            elif name == "__pycache__":
                rmtree(pj(root, name))
                dnames.remove(name)

        for name in fnames:
            if not name.startswith("."):
                if splitext(name)[1] in [".pyc", ".pyo"]:
                    remove(pj(root, name))
Beispiel #18
0
def protocol_repartition_depth(folder):
    output_folder = pj(folder, "results", "protocol_repartition")
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
    depth_df.set_index("node", inplace=True)

    pcap_df = pd.read_csv(pj(folder, "results", "pcap_relooked.csv"))
    pcap_df = pcap_df.join(depth_df, on="mac_src")

    res = pd.DataFrame()

    res["rpl"] = pcap_df[pcap_df.icmpv6_type == "rpl"].groupby("depth").sum().length
    res["udp"] = pcap_df[pcap_df.icmpv6_type == "udp"].groupby("depth").sum().length

    RATE = 250000
    res["rpl"] = 8.0 * res["rpl"] / RATE
    res["udp"] = 8.0 * res["udp"] / RATE

    ax = res.plot(kind="bar", stacked=True)
    ax.set_ylabel('Time [s]')
    ax.set_xlabel("Depth")
    ax.set_xticklabels(res.index.map(int), rotation=0)

    fig = plt.gcf()
    plt.tight_layout()

    # fig.set_size_inches(18.5, 10.5)
    fig.savefig(pj(output_folder, 'protocol_repartition_depth.png'))
    plt.close('all')
Beispiel #19
0
    def remove(session, item):
        """Remove a given item from the database.

        Args:
            session: (DBSession)
            item: (ContentItem)

        Returns:
            (True)
        """
        pth = GalleryItem.gallery_pth(item.project)

        # remove associated thumbnail
        thumb_pth = pj(pth, "%s_thumb.png" % item.id)
        if exists(thumb_pth):
            remove(thumb_pth)

        # remove associated resource if in gallery
        if item.url.startswith("seeweb:"):
            img_name = item.url.split("/")[-1]
            img_pth = pj(pth, img_name)
            if exists(img_pth):
                remove(img_pth)

        # delete item
        session.delete(item)

        return True
Beispiel #20
0
def protocol_repartition_aggregated(folder, BIN=25):
    output_folder = pj(folder, "results", "protocol_repartition")
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # depth_df = pd.read_csv(pj(folder, "results", "depth.csv"))
    # depth_df.set_index("node", inplace=True)

    pcap_df = pd.read_csv(pj(folder, "results", "pcap_relooked.csv"))
    pcap_df["bin_start"] = BIN * (pcap_df.time // BIN)

    res = pd.DataFrame()

    RATE = 250000
    res["udp"] = pcap_df[(pcap_df.icmpv6_type == "udp") & (pcap_df.time < 200)].groupby("bin_start").sum().length
    res["rpl"] = pcap_df[(pcap_df.icmpv6_type == "rpl") & (pcap_df.time < 200)].groupby("bin_start").sum().length

    res["rpl"] = 8.0 * res["rpl"] / RATE
    res["udp"] = 8.0 * res["udp"] / RATE

    ax = res[["rpl", "udp"]].plot(kind="bar", stacked=True)
    ax.set_ylabel('Time [s]')
    ax.set_xlabel("Time [s]")
    ax.set_ylim(0, 4.5)
    ax.set_xticklabels(res.index.map(int), rotation=0)

    fig = plt.gcf()
    plt.tight_layout()

    # fig.set_size_inches(18.5, 10.5)
    fig.savefig(pj(output_folder, 'protocol_repartition_aggregated.png'))
    plt.close('all')
Beispiel #21
0
def overhead(folder):
    """
    Plot the overhead (RPL, ACK,...).

    This graph measures the amount of bytes sent by nodes that are not
    application oriented (UDP, ping or CoAP) therefore we can see the amount
    of bytes transmitted just to keep the network alive. This packets are
    usually RPL and ACK packets.)

    """
    fig = plt.figure()
    ax1 = fig.add_subplot(111)

    ax1.set_title('RPL traffic by time')
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('RPL traffic (bytes)')

    with open(pj(folder, "results", "io.csv")) as io_csv_f:
        reader = DictReader(io_csv_f)
        time, overhead_bytes = [], []
        for row in reader:
            time.append(float(row["bin_end"]))
            overhead_bytes.append(float(row["total_bytes"])
                                  - float(row["udp_bytes"])
                                  - float(row["ping_bytes"])
                                  - float(row["coap_bytes"]))
        ax1.plot(time, overhead_bytes)

    img_path = pj(self.img_dir, "overhead.png")
    fig.savefig(img_path)
    plt.close('all')
Beispiel #22
0
 def generate(self, key, stage, piece):
     """Handles responsibility for replacing 'gen' with the correct 
     value"""
     value = ''
     if key == 'images':
         value = self.generate_images(stage, piece)
     if key == 'source':
         raw_image = self.raw[0]
         (dirname, base ) = os.path.split(raw_image)
         pre = self.find_prefix(stage, piece)
         value = pj(dirname, '%s%s%s' % ('mean', pre[1:], base))
     if key == 'session':
         for n_run in range(1,self.n_runs+1):
             value += self.generate_session(n_run, piece)
     if key == 'directory':
         value = self.analysis_dir(piece['name'])
     if key == 'spm_mat_file':
         value = pj(self.analysis_dir(piece['name']), 'SPM.mat')
     if key == 'contrast':
         contrasts = self.paradigm['contrasts']
         # for multiple model paradigms, contrasts will be a dictionary
         if piece['name'] in contrasts:
             contrasts = contrasts[piece['name']]
         for n_con, contrast in enumerate(contrasts):
             value += self.generate_contrast( n_con + 1, contrast, piece)
     return value 
Beispiel #23
0
def msgbakloop(_dir,q,sid):
    _now = datetime.now()
    _sdir = str(_now.month)
    _np = pj(pj(_dir,q),_sdir)
    for i in range(2,8): #
        savedaymsg(sid,q,i,_np)
    print ' ----------------\nDone!'
def run_model(i, task, params, include, group_only):

    print '---', i, task, params, include, group_only, '---'

    # create the file structure
    root = ('congruence_and_modulation_affects_' +
            '_'.join(p for p in params))
    path = pj(
        root, task, '_'.join(p for p in include),
        '_'.join(p for p in group_only))
    samples_filename = pj(path, 'samples_%i.pickle' % i)
    model_filename = pj(path, 'model_%i.pickle' % i)

    if not os.path.exists(path):
        os.makedirs(path)

    # fit and save the model
    s = "%s~C(congruence, Treatment('control'))*C(modulation, Treatment('slow'))"
    design = [s % p for p in params]

    if not os.path.exists(model_filename):
        data = pandas.read_pickle('data.pickle')
        print data.head()
        data = data[(data.task == task)]
        model = hddm.HDDMRegressor(data,
                                   design,
                                   include=include,
                                   group_only_nodes=group_only)
        model.find_starting_values()
        model.sample(10000, burn=500, 
                     dbname=samples_filename, db='pickle')
        model.save(model_filename)

    return
Beispiel #25
0
def make(tool, mainFD):
  makeFD=codecs.open(pj(args.reportOutDir, tool, "make.txt"), "w", encoding="utf-8")
  try:
    if not args.disableMake:
      # make
      errStr=""
      if not args.disableMakeClean:
        print("\n\nRUNNING make clean\n", file=makeFD); makeFD.flush()
        if subprocess.call(["make", "clean"], stderr=subprocess.STDOUT, stdout=makeFD)!=0: errStr=errStr+"make clean failed; "
      print("\n\nRUNNING make -k\n", file=makeFD); makeFD.flush()
      if subprocess.call(["make", "-k", "-j", str(args.j)], stderr=subprocess.STDOUT, stdout=makeFD)!=0: errStr=errStr+"make failed; "
      if not args.disableMakeInstall:
        print("\n\nRUNNING make install\n", file=makeFD); makeFD.flush()
        if subprocess.call(["make", "-k", "install"], stderr=subprocess.STDOUT, stdout=makeFD)!=0: errStr=errStr+"make install failed; "
      if errStr!="": raise RuntimeError(errStr)
    else:
      print("make disabled", file=makeFD); makeFD.flush()

    result="done"
  except RuntimeError as ex:
    result=str(ex)
  if not args.disableMake:
    print('<td class="%s"><span class="glyphicon glyphicon-%s"></span>&nbsp;'%("success" if result=="done" else "danger",
      "ok-sign alert-success" if result=="done" else "exclamation-sign alert-danger"), file=mainFD)
    print('  <a href="'+myurllib.pathname2url(pj(tool, "make.txt"))+'">'+result+'</a>', file=mainFD)
    print('</td>', file=mainFD)
  makeFD.close()
  mainFD.flush()

  if result!="done":
    return 1
  return 0
Beispiel #26
0
def tree(dname, padding, txt):
    pkg_cfg = dict(base={"namespace": None, "owner": "owner", "pkgname": "pkgname", "url": None})

    env = create_env(pkg_cfg)

    files = [(isdir(pj(dname, fname)), fname) for fname in listdir(dname)]
    files.sort()

    count = 0
    for is_dir, fname in files:
        count += 1
        txt += padding + "|\n"
        fmt_name = nn(env, fname)
        txt += padding + "+-" + fmt_name
        path = pj(dname, fname)
        if is_dir:
            txt += "/\n"
            if count == len(files):
                txt = tree(path, padding + " " + " " * (len(fmt_name) / 2), txt)
            else:
                txt = tree(path, padding + "|" + " " * (len(fmt_name) / 2), txt)
        else:
            txt += "\n"

    return txt
Beispiel #27
0
def message(folder):
    """
    Message queue preparation

    - Extract from powertracker all the message
    - Extract from the serial logs all the message.

    IMPORTANT: We only extract the message received from the root or send by
    the root.

    186572 ID:2 DATA send to 1 'Hello 1'
    187124 ID:8 DATA recv 'Hello 1' from 2
    197379 ID:8 REPLY send to 7 'Reply 1'
    197702 ID:7 REPLY recv 'Reply 1' from 8
    """
    stats = defaultdict(float)

    sorted_messages = sorted(
        chain(serial2message(folder, stats),
              # powertracker2message(folder, stats)
              ),
        key=operator.attrgetter("time"))

    print_stats(stats)

    # MESSAGE logging for debugging purposes #

    log.info(pj(folder, "results", "messages.csv"))
    with open(pj(folder, "results", "messages.csv"), "w") as f:
        writer = DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        for m in sorted_messages:
            writer.writerow(m._asdict())
    log.info("messages saved")
    return sorted_messages
Beispiel #28
0
    def run(self):
        dst_path = "dist/ooni-resources.tar.gz"

        try:
            self.find_executables()
        except ExecutableNotFound as enf:
            print("ERR: Could not find '{0}'".format(enf.message))
            return

        tmp_dir = tempfile.mkdtemp()
        pkg_dir = tempfile.mkdtemp()

        os.mkdir(pj(pkg_dir, "resources"))
        os.mkdir(pj(pkg_dir, "GeoIP"))

        try:
            geoip_asn_path = self.download(GEOIP_ASN_URL, tmp_dir, "GeoIPASNum.dat.gz")
        except Exception as exc:
            print(exc.message)
            return
        try:
            geoip_path = self.download(GEOIP_URL, tmp_dir, "GeoIP.dat.gz")
        except Exception as exc:
            print(exc.message)
            return
        try:
            test_lists_path = self.download(TEST_LISTS_URL, tmp_dir, "master.zip")
        except Exception as exc:
            print(exc.message)
            return

        run_command([self.gunzip, geoip_asn_path])
        run_command([self.gunzip, geoip_path])
        run_command([self.unzip, "-d", tmp_dir, test_lists_path])

        shutil.move(pj(tmp_dir, "GeoIP.dat"),
                    pj(pkg_dir, "GeoIP", "GeoIP.dat"))
        shutil.move(pj(tmp_dir, "GeoIPASNum.dat"),
                    pj(pkg_dir, "GeoIP", "GeoIPASNum.dat"))
        shutil.move(pj(tmp_dir, "test-lists-master", "lists"),
                    pj(pkg_dir, "resources", "citizenlab-test-lists"))
        # Don't include services and official lists
        shutil.rmtree(
            pj(pkg_dir,
               "resources",
               "citizenlab-test-lists",
               "services"),
            ignore_errors=True)
        shutil.rmtree(
            pj(pkg_dir,
               "resources",
               "citizenlab-test-lists",
               "official"),
            ignore_errors=True)
        run_command([self.tar, "cvzf", dst_path, "-C", pkg_dir, "."])

        # Cleanup
        shutil.rmtree(pkg_dir, ignore_errors=True)
        shutil.rmtree(tmp_dir, ignore_errors=True)
        print("Written ooniresources to {0}".format(dst_path))
    def _build_kernel_specs(self):
        """Returns the dict of name -> kernel_spec for all environments"""

        # This is called much too often and the conda calls are really expensive :-(
        if hasattr(self, "_build_kernel_specs_cache"):
            return getattr(self, "_build_kernel_specs_cache")

        venv_dirs = self.find_envs()
        kspecs = {}
        if platform.system() == "Windows":
            python_exe_name = "python.exe"
        else:
            python_exe_name = "python"

        for venv_name, venv_dir in venv_dirs.items():
            # conda on windows has python.exe directly in the env
            exe_name = pj(venv_dir, python_exe_name)
            if not os.path.exists(exe_name):
                exe_name = pj(venv_dir, "bin", python_exe_name)
            kspec_dict =  {"argv": [exe_name,
                                    "-m",
                                    "IPython.kernel",
                                    "-f",
                                    "{connection_file}"],
                           "language": "python",
                           "display_name": self.display_name_template.format(venv_name),
                           "env": {}}
            # This should probably use self.kernel_spec_class instead of the direct class
            kspecs.update({venv_name: KernelSpec(resource_dir=RESOURCES, **kspec_dict)})
        self._build_kernel_specs_cache = kspecs
        return kspecs
Beispiel #30
0
def check(tool, mainFD):
  checkFD=codecs.open(pj(args.reportOutDir, tool, "check.txt"), "w", encoding="utf-8")
  if not args.disableMakeCheck:
    # make check
    print("RUNNING make check\n", file=checkFD); checkFD.flush()
    if simplesandbox.call(["make", "-j", str(args.j), "check"], envvar=["PKG_CONFIG_PATH", "LD_LIBRARY_PATH"], shareddir=["."],
                          stderr=subprocess.STDOUT, stdout=checkFD)==0:
      result="done"
    else:
      result="failed"
  else:
    print("make check disabled", file=checkFD); checkFD.flush()
    result="done"

  foundTestSuiteLog=False
  testSuiteLogFD=codecs.open(pj(args.reportOutDir, tool, "test-suite.log.txt"), "w", encoding="utf-8")
  for rootDir,_,files in os.walk('.'): # append all test-suite.log files
    if "test-suite.log" in files:
      testSuiteLogFD.write('\n\n')
      testSuiteLogFD.write(open(pj(rootDir, "test-suite.log")).read())
      foundTestSuiteLog=True
  testSuiteLogFD.close()
  if not args.disableMakeCheck:
    print('<td class="%s"><span class="glyphicon glyphicon-%s"></span>&nbsp;'%("success" if result=="done" else "danger",
      "ok-sign alert-success" if result=="done" else "exclamation-sign alert-danger"), file=mainFD)
    print('  <a href="'+myurllib.pathname2url(pj(tool, "check.txt"))+'">'+result+'</a>', file=mainFD)
    if foundTestSuiteLog:
      print('  <a href="'+myurllib.pathname2url(pj(tool, "test-suite.log.txt"))+'">test-suite.log</a>', file=mainFD)
    print('</td>', file=mainFD)
  checkFD.close()
  mainFD.flush()

  if result!="done":
    return 1
  return 0
Beispiel #31
0
# encoding: utf-8
# pylint: skip-file
"""
This file contains tests for the madmom.features.tempo module.

"""

from __future__ import absolute_import, division, print_function

import unittest
from os.path import join as pj

from . import ACTIVATIONS_PATH
from madmom.features.tempo import *

act_file = np.load(pj(ACTIVATIONS_PATH, "sample.beats_blstm.npz"))
act = act_file['activations'].astype(np.float)
fps = float(act_file['fps'])

COMB_TEMPI = np.array([[176.470, 0.475], [117.647, 0.177], [240.0, 0.154],
                       [68.966, 0.099], [82.192, 0.096]])

HIST = interval_histogram_comb(act, 0.79, min_tau=24, max_tau=150)


class TestIntervalHistogramAcfFunction(unittest.TestCase):
    def test_values(self):
        hist = interval_histogram_acf(act, min_tau=24, max_tau=150)
        self.assertTrue(
            np.allclose(hist[0][:6], [
                0.10034907, 0.10061631, 0.11078519, 0.13461014, 0.17694432,
Beispiel #32
0
 def setUp(self):
     self.bin = pj(program_path, "BarTracker")
     self.act = np.load(pj(ACTIVATIONS_PATH, 'sample.bar_tracker.npz'))
     self.beats = [[0.091, 1], [0.8, 2], [1.481, 3], [2.148, 1]]
     self.downbeats = [0.091, 2.148]
Beispiel #33
0
try:
    from cStringIO import StringIO
except ImportError:
    from io import StringIO

import numpy as np

from madmom.features import Activations
from madmom.features.chords import load_chords
from madmom.evaluation.key import load_key

from . import AUDIO_PATH, ACTIVATIONS_PATH, ANNOTATIONS_PATH, DETECTIONS_PATH

tmp_act = tempfile.NamedTemporaryFile(delete=False).name
tmp_result = tempfile.NamedTemporaryFile(delete=False).name
sample_file = pj(AUDIO_PATH, 'sample.wav')
sample2_file = pj(AUDIO_PATH, 'sample2.wav')
sample_file_22050 = pj(AUDIO_PATH, 'sample_22050.wav')
sample_beats = pj(ANNOTATIONS_PATH, 'sample.beats')
stereo_sample_file = pj(AUDIO_PATH, 'stereo_sample.wav')
program_path = os.path.dirname(os.path.realpath(__file__)) + '/../bin/'

# prevent writing compiled Python files to disk
sys.dont_write_bytecode = True


def run_program(program):
    # import module, capture stdout
    test = imp.load_source('test', program[0])
    sys.argv = program
    backup = sys.stdout
Beispiel #34
0
inputs_v = moving_average(np.random.randn(seq_size),
                          10).reshape(seq_size, batch_size, 1)

sess = tf.Session()
saver = tf.train.Saver()

model_fname = env.run("glm_model.ckpt")
if os.path.exists(model_fname):
    print "Restoring from {}".format(model_fname)
    saver.restore(sess, model_fname)
    epochs = 0
else:
    sess.run(tf.global_variables_initializer())

reward_v, reward_mean_v = None, None
tmp_dir = pj(os.environ["HOME"], "tmp")
[os.remove(pj(tmp_dir, f)) for f in os.listdir(tmp_dir) if f[-4:] == ".png"]

epochs = 1000
for e in xrange(epochs):
    state_v = GLMStateTuple(
        np.zeros((batch_size, net_size)),
        np.zeros((batch_size, filters_num + net_size)),
        np.zeros((batch_size, net_size)),
        np.zeros((batch_size, net_size)),
        np.zeros((batch_size, L, net_size)),
        np.zeros((batch_size, filters_num + net_size, net_size)),
        np.zeros((batch_size, L, filters_num)),
        np.zeros((batch_size, L, 1)),
    )
Beispiel #35
0
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score, cross_val_predict

# %%
data_dir = Path('path/to/Data').resolve()
results_dir = Path('path/to/results').resolve()

# %% ## Loading and cleaning data

data = pd.read_excel(
    pj(data_dir, 'macro_dataset'),
    header=0,
    sep=',',  #### load data as pandas dataframe, set sample IDs as index
    index_col='Subject_ID')

data.dropna(axis='columns', how='any', inplace=True)

data = data[data['report_bias'] ==
            0]  ### dropping data from biased questionaires

# %% ### Select features and target columns of the dataset (continuous and categorical)

feats = [column for column in data.columns if 'delta' in column]

feats_cat = [
    column for column in data.columns
Beispiel #36
0
def main(home_dir):
    from os.path import join as pj

    # Check if we are in the top of the ABINIT source tree
    my_name = os.path.basename(__file__) + ".main"
    if (not os.path.exists(pj(home_dir, "configure.ac"))
            or not os.path.exists(pj(home_dir, "src/98_main/abinit.F90"))):
        print("%s: You must be in the top of an ABINIT source tree." % my_name)
        print("%s: Aborting now." % my_name)
        sys.exit(1)

    # Init
    cnf_bin = MyConfigParser()
    cnf_fname = pj(home_dir, "config/specs/binaries.conf")
    cnf_bin.read(cnf_fname)
    bin_list = cnf_bin.sections()
    bin_list.sort()
    dep_order = dict()
    lib_order = dict()
    re_num = re.compile("[0-9][0-9]_")

    # Check order of dependencies and libraries
    for prg in bin_list:
        if cnf_bin.has_option(prg, "dependencies"):
            bin_deps = cnf_bin.get(prg, "dependencies").split()
        else:
            bin_deps = list()
        dep_old = 100
        dep_new = 100
        for dep in bin_deps:
            if dep in dep_levels:
                dep_new = dep_levels[dep]
            else:
                sys.stderr.write("%s: Error: unregistered dependency '%s'\n" %
                                 (my_name, dep))
                sys.exit(10)
            if dep_new > dep_old:
                if not prg in dep_order:
                    dep_order[prg] = list()
                dep_order[prg].append(dep)
            dep_old = dep_new

        if cnf_bin.has_option(prg, "libraries"):
            bin_libs = cnf_bin.get(prg, "libraries").split()
        else:
            bin_libs = list()
        lib_old = 100
        lib_new = 100
        for lib in bin_libs:
            if re_num.match(lib):
                lib_new = int(re.sub("_.*", "", lib))
                if lib_new > lib_old:
                    if not prg in lib_order:
                        lib_order[prg] = list()
                    lib_order[prg].append(lib)
            lib_old = lib_new

    # Report any disorder
    nerr = len(dep_order) + len(lib_order)

    if nerr > 0:
        sys.stderr.write("%s: reporting disordered libraries\n\n" %
                         (os.path.basename(sys.argv[0])))
        sys.stderr.write("X: D=Dependency / L=Library\n\n")
        sys.stderr.write("%s  %-24s  %-24s\n" %
                         ("X", "Program", "Dependency/Library"))
        sys.stderr.write("%s  %s  %s\n" % ("-", "-" * 24, "-" * 24))

        dep_keys = list(dep_order.keys())
        dep_keys.sort()
        for prg in dep_keys:
            for dep in dep_order[prg]:
                sys.stderr.write("%s  %-24s  %-24s\n" % ("D", prg, dep))
        lib_keys = list(lib_order.keys())
        lib_keys.sort()
        for prg in lib_keys:
            for lib in lib_order[prg]:
                sys.stderr.write("%s  %-24s  %-24s\n" % ("L", prg, lib))
        sys.stderr.write("\n")

    return nerr
Beispiel #37
0
import os
import json
from os.path import join as pj

from fabric.api import task
from jinja2 import Environment, FileSystemLoader

# Default values
ROOT_DIR = os.path.dirname(__file__)
EXPERIMENT_FOLDER = pj(ROOT_DIR, "experiments")
TEMPLATE_FOLDER = pj(ROOT_DIR, "templates")
TEMPLATE_ENV = Environment(loader=FileSystemLoader(TEMPLATE_FOLDER))


@task
def my_special_function(name):
    """ This function will create 42 C files. """
    path = pj(EXPERIMENT_FOLDER, name)
    if not os.path.exists(path):
        os.makedirs(path)

    c_template = TEMPLATE_ENV.get_template("dummy_template.c")
    # We make the id start at 1 and finish at 42
    for value in range(1, 43):
        with open(pj(path, "dummy_%d.c" % value), "w") as f:
            f.write(c_template.render(my_value=value))

    # If you change the platform target and want to push to iotlab
    # don't forget to update the nodes names
    makefile_template = TEMPLATE_ENV.get_template("dummy_makefile")
    with open(pj(path, "Makefile"), "w") as f:
Beispiel #38
0
 def _load_gml(name, filename):
     return ImportedGraphBuilder() \
         .set_name(name) \
         .import_gml(pj(Datasets.DATASETS_ROOT_PATH, name, filename)) \
         .build()
Beispiel #39
0
 def _load_polbooks_or_football(name, nodes_path, edges_path):
     return ImportedGraphBuilder() \
         .set_name(name) \
         .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, name, nodes_path), name_col_idx='idx', class_col_idx=2) \
         .import_edges(pj(Datasets.DATASETS_ROOT_PATH, name, edges_path)) \
         .build()
Beispiel #40
0
class Datasets:
    DATASETS_ROOT_PATH = pj(os.path.dirname(os.path.abspath(__file__)), 'datasets')

    _lazy_datasets = {
        # 'as': lambda: Datasets._load_altsoph('as', 'as.clusters', 'as.edges'),  # TOO BIG
        # 'citeseer': lambda: Datasets._load_webkb_like('citeseer', 'citeseer.nodes', 'citeseer.edges'),  # BROKEN
        # 'cora_full': lambda: Datasets._load_altsoph('cora_full', '_old.clusters', '_old.edges'),  # TOO BIG
        'dolphins': lambda: Datasets._load_altsoph('dolphins', 'dolphins.clusters', 'dolphins.edges'),
        'eu-core': lambda: Datasets._load_altsoph('eu-core', 'eu-core.clusters', 'eu-core.edges'),
        'football': lambda: Datasets._load_gml('football', 'football.gml'),
        'karate': lambda: Datasets._load_altsoph('karate', 'karate.clusters', 'karate.edges'),
        'news_2cl_1': lambda: Datasets._load_newsgroup('news_2cl_1', 'news_2cl_1_classeo.csv', 'news_2cl_1_Docr.csv'),
        'news_2cl_2': lambda: Datasets._load_newsgroup('news_2cl_2', 'news_2cl_2_classeo.csv', 'news_2cl_2_Docr.csv'),
        'news_2cl_3': lambda: Datasets._load_newsgroup('news_2cl_3', 'news_2cl_3_classeo.csv', 'news_2cl_3_Docr.csv'),
        'news_3cl_1': lambda: Datasets._load_newsgroup('news_3cl_1', 'news_3cl_1_classeo.csv', 'news_3cl_1_Docr.csv'),
        'news_3cl_2': lambda: Datasets._load_newsgroup('news_3cl_2', 'news_3cl_2_classeo.csv', 'news_3cl_2_Docr.csv'),
        'news_3cl_3': lambda: Datasets._load_newsgroup('news_3cl_3', 'news_3cl_3_classeo.csv', 'news_3cl_3_Docr.csv'),
        'news_5cl_1': lambda: Datasets._load_newsgroup('news_5cl_1', 'news_5cl_1_classeo.csv', 'news_5cl_1_Docr.csv'),
        'news_5cl_2': lambda: Datasets._load_newsgroup('news_5cl_2', 'news_5cl_2_classeo.csv', 'news_5cl_2_Docr.csv'),
        'news_5cl_3': lambda: Datasets._load_newsgroup('news_5cl_3', 'news_5cl_3_classeo.csv', 'news_5cl_3_Docr.csv'),
        'polblogs': lambda: Datasets._load_gml('polblogs', 'polblogs.gml'),
        'polbooks': lambda: Datasets._load_gml('polbooks', 'polbooks.gml'),
        # 'webkb_cornel': lambda: Datasets._load_webkb('webkb_cornell', 'cornell/webkb-cornell.nodes',
        #                                              'cornell/webkb-cornell.edges'),  # POSSIBLY BROKEN
        # 'webkb_texas': lambda: Datasets._load_webkb('webkb_texas', 'texas/webkb-texas.nodes',
        #                                             'texas/webkb-texas.edges'),  # POSSIBLY BROKEN
        # 'webkb_washington': lambda: Datasets._load_webkb('webkb_washington', 'washington/webkb-washington.nodes',
        #                                                  'washington/webkb-washington.edges'),  # POSSIBLY BROKEN
        # 'webkb_wisconsin': lambda: Datasets._load_webkb('webkb_wisconsin', 'wisconsin/webkb-wisconsin.nodes',
        #                                                 'wisconsin/webkb-wisconsin.edges')  # POSSIBLY BROKEN
    }

    _loaded_datasets = {}

    @property
    def newsgroup(self):
        newsgroup_names = ['news_2cl_1', 'news_2cl_2', 'news_2cl_3',
                           'news_3cl_1', 'news_3cl_2', 'news_3cl_3',
                           'news_5cl_1', 'news_5cl_2', 'news_5cl_3']
        return [self[x] for x in newsgroup_names]

    @property
    def webkb(self):
        webkb_names = ['webkb_cornel', 'webkb_texas', 'webkb_washington', 'webkb_wisconsin']
        return [self[x] for x in webkb_names]

    @property
    def all(self):
        return [self[x] for x in self._lazy_datasets.keys()]

    @staticmethod
    def _load_altsoph(name, nodes_path, edges_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, name, nodes_path)) \
            .import_edges(pj(Datasets.DATASETS_ROOT_PATH, name, edges_path)) \
            .build()

    @staticmethod
    def _load_polbooks_or_football(name, nodes_path, edges_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, name, nodes_path), name_col_idx='idx', class_col_idx=2) \
            .import_edges(pj(Datasets.DATASETS_ROOT_PATH, name, edges_path)) \
            .build()

    @staticmethod
    def _load_polblogs_or_zachary(name, graph_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes_and_edges(pj(Datasets.DATASETS_ROOT_PATH, name, graph_path)) \
            .build()

    @staticmethod
    def _load_newsgroup(name, nodes_path, edges_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, 'newsgroup', nodes_path),
                          name_col_idx='idx', class_col_idx=0) \
            .import_adjacency_matrix(pj(Datasets.DATASETS_ROOT_PATH, 'newsgroup', edges_path)) \
            .build()

    @staticmethod
    def _load_webkb(name, nodes_path, edges_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, 'webkb', nodes_path),
                          startline=2, name_col_idx=0, class_col_idx=-1) \
            .import_edges(pj(Datasets.DATASETS_ROOT_PATH, 'webkb', edges_path),
                          startline=3, node1_col_idx=1, node2_col_idx=3) \
            .build()

    @staticmethod
    def _load_webkb_like(name, nodes_path, edges_path):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, name, nodes_path),
                          startline=2, name_col_idx=0, class_col_idx=-1) \
            .import_edges(pj(Datasets.DATASETS_ROOT_PATH, name, edges_path),
                          startline=3, node1_col_idx=1, node2_col_idx=3) \
            .build()

    @staticmethod
    def _load_gml(name, filename):
        return ImportedGraphBuilder() \
            .set_name(name) \
            .import_gml(pj(Datasets.DATASETS_ROOT_PATH, name, filename)) \
            .build()

    def __getitem__(self, item):
        if item not in self._loaded_datasets:
            self._loaded_datasets[item] = self._lazy_datasets[item]()
        return self._loaded_datasets[item]

    def __getattr__(self, name):
        return self[name]
Beispiel #41
0
author_email = ''
url = 'http://openalea.gforge.inria.fr'
license = 'Cecill-C'

packages = [
    namespace + "." + pkg for pkg in find_packages('src')
    if 'vplants' not in pkg
]
print pkg_name
setup(
    name=name,
    version=version,
    description=description,
    author=author,
    author_email=author_email,
    url=url,
    license=license,
    namespace_packages=['vplants'],
    create_namespaces=True,
    zip_safe=False,
    packages=packages,
    package_dir={
        pkg_name: pj('src', 'tree_reduction'),
        '': 'src'
    },

    # Dependencies
    install_requires=['openalea.deploy'],
    dependency_links=['http://openalea.gforge.inria.fr/pi'],
)
Beispiel #42
0
 def _load_polblogs_or_zachary(name, graph_path):
     return ImportedGraphBuilder() \
         .set_name(name) \
         .import_nodes_and_edges(pj(Datasets.DATASETS_ROOT_PATH, name, graph_path)) \
         .build()
Beispiel #43
0
 def output_filename(self, filename):
     folder = re.search('shared.*(?=/)', filename).group(0)
     shortname = re.search('[^/]+$', filename).group(0)
     return pj(self.conf['dirs']['output'], folder), shortname
Beispiel #44
0
 def _load_altsoph(name, nodes_path, edges_path):
     return ImportedGraphBuilder() \
         .set_name(name) \
         .import_nodes(pj(Datasets.DATASETS_ROOT_PATH, name, nodes_path)) \
         .import_edges(pj(Datasets.DATASETS_ROOT_PATH, name, edges_path)) \
         .build()
        return int(tol[-1])


def xlat_pmax(pmax):
    if '/' in pmax:
        a = [float(x) for x in pmax.split("/")]
        return a[0] / a[1]
    else:
        return float(pmax)


pool_path = os.getenv("HORIZON_POOL")
if pool_path is None:
    raise IOError("need HORIOZN_POOL")

base_path = pj(pool_path, "parts", "passive", "resistor", "samsung", "rc")

gen = util.UUIDGenerator("uu.txt")

pmaxs = set()
for row in j_raw["rows"]:
    mpn = row["parnum"]
    pkg = row["sizcd_eia"]
    rval = row["rval"][:-1]
    if rval[-1] in muls:
        value = float(rval[:-1]) * muls[rval[-1]]
    else:
        value = float(rval)
    tol = xlat_tol(row['tol'])

    pmax = row['ratpow']
Beispiel #46
0
"""

from __future__ import absolute_import, division, print_function

import os
import sys
import tempfile
import unittest
from os.path import join as pj

from madmom.io.audio import *
from . import AUDIO_PATH, DATA_PATH
from .test_audio_comb_filters import sig_1d, sig_2d

sample_file = pj(AUDIO_PATH, 'sample.wav')
sample_file_22k = pj(AUDIO_PATH, 'sample_22050.wav')
stereo_sample_file = pj(AUDIO_PATH, 'stereo_sample.wav')
tmp_file = tempfile.NamedTemporaryFile(delete=False).name


class TestLoadWaveFileFunction(unittest.TestCase):

    def test_types(self):
        signal, sample_rate = load_wave_file(sample_file)
        self.assertIsInstance(signal, np.ndarray)
        self.assertTrue(signal.dtype == np.int16)
        self.assertTrue(type(sample_rate) == int)

    def test_file_handle(self):
        # open file handle
Beispiel #47
0
This file contains test functions for the madmom.utils module.

"""

from __future__ import absolute_import, division, print_function

import unittest
from os.path import join as pj

from madmom.utils import *
from . import (ACTIVATIONS_PATH, ANNOTATIONS_PATH, AUDIO_PATH, DATA_PATH,
               DETECTIONS_PATH)
from .test_evaluation_notes import ANNOTATIONS as NOTES

FILE_LIST = [
    pj(DATA_PATH, 'README'),
    pj(DATA_PATH, 'commented_txt'),
    pj(DATA_PATH, 'events.txt')
]

AUDIO_FILES = [
    pj(AUDIO_PATH, 'sample.wav'),
    pj(AUDIO_PATH, 'sample2.wav'),
    pj(AUDIO_PATH, 'sample_22050.wav'),
    pj(AUDIO_PATH, 'stereo_chirp.wav'),
    pj(AUDIO_PATH, 'stereo_chirp_rg.flac'),
    pj(AUDIO_PATH, 'stereo_sample.flac'),
    pj(AUDIO_PATH, 'stereo_sample.m4a'),
    pj(AUDIO_PATH, 'stereo_sample_rg.flac'),
    pj(AUDIO_PATH, 'stereo_sample.wav')
]
def mobileNetV2_tst():
    with tf.Graph().as_default():
        is_training = tf.placeholder(tf.bool, [])
        url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
        image_string = urllib.urlopen(url).read()
        image = tf.image.decode_jpeg(image_string, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)
        # print(processed_images.shape)
        print(processed_images.dtype)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(mobilenet_v2.training_scope(is_training=False)):
            global_pool, endpoints = mobilenet_v2.mobilenet(processed_images,
                                                            num_classes=None)
        # print(type(endpoints))
        # print(len(endpoints.keys()))
        # print(endpoints['layer_18/output'].shape) # (1, 7, 7, 320)
        # print(endpoints['layer_18'].shape) # (1, 7, 7, 320)
        # print(endpoints['layer_19'].shape) # (1, 7, 7, 320)
        # print(endpoints['global_pool'].shape) # (1, 1, 1, 1280)
        # pprint(endpoints.keys())

        # variables_to_restore = slim.get_variables_to_restore(exclude=['MobilenetV2/Logits/Conv2d_1c_1x1'])
        variables_to_restore = slim.get_variables_to_restore()
        print(len(variables_to_restore))  # 260
        restorer = tf.train.Saver(variables_to_restore)

        # print(variables_to_restore)

        dropout_keep_prob = 0.5
        n_classes = 2
        weight_decay = 0.05
        with tf.variable_scope('addition', 'fc'):
            # flatten = tf.flatten(endpoints['global_pool'])
            flatten = slim.flatten(global_pool)
            with slim.arg_scope(
                [slim.fully_connected],
                    weights_regularizer=slim.l2_regularizer(weight_decay),
                    weights_initializer=tc.layers.xavier_initializer(
                        tf.float32),
                    # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                    activation_fn=None) as sc:
                net = slim.fully_connected(flatten,
                                           128,
                                           activation_fn=None,
                                           scope='fc1')
                net = slim.dropout(net,
                                   dropout_keep_prob,
                                   is_training=is_training,
                                   scope='dropout')
                logits = slim.fully_connected(net,
                                              n_classes,
                                              activation_fn=None,
                                              scope='fc2')

        # with tf.name_scope('loss') :

        probabilities = tf.nn.softmax(logits)

        ckptDir = r'D:\Lab408\tfslim\mobileNetV2'
        ckptPath = pj(ckptDir, 'mobilenet_v2_1.0_224.ckpt')

        # variables_to_restore = slim.get_variables('MobilenetV2/Logits/Conv2d_1c_1x1')
        # variables_to_save = slim.get_variables_to_restore(exclude=['MobilenetV2/Logits/Conv2d_1c_1x1'])
        variables_to_save = slim.get_variables_to_restore()
        print(len(variables_to_save))  # 264 两层 fc 4个参数
        saver = tf.train.Saver(variables_to_save)

        # print(variables_to_save)
        # pprint(variables_to_restore)
        # variables_to_restore = slim.get_model_variables()
        # print(len(variables_to_restore))

        op_init1 = tf.variables_initializer(tf.global_variables())
        op_init2 = tf.variables_initializer(tf.local_variables())
        op_group = tf.group(op_init1, op_init2)

        saveto = r'D:\Lab408\tfslim\mobileNetV2-finetue\a2'
        # ckptPath = saveto
        # variables_to_restore = variables_to_save
        saveto = r'D:\Lab408\tfslim\mobileNetV2-finetue\a3'
        init_fn0 = slim.assign_from_checkpoint_fn(ckptPath,
                                                  variables_to_restore)
        sess_conf = tf.ConfigProto()
        sess_conf.gpu_options.allow_growth = True
        with tf.Session(config=sess_conf) as sess:
            sess.run(op_group)  # fc参数需要初始化
            init_fn0(sess)
            np_image, probabilities = sess.run([image, probabilities],
                                               feed_dict={is_training: False})
            probabilities = probabilities[0, 0:]
            sorted_inds = [
                i[0]
                for i in sorted(enumerate(-probabilities), key=lambda x: x[1])
            ]
            # restorer.save(sess, saveto)
            saver.save(sess, saveto)

        names = imagenet.create_readable_names_for_imagenet_labels()
        for i in range(n_classes):
            index = sorted_inds[i]
            outstr = 'Probability {:.2%} => {}'.format(probabilities[index],
                                                       names[index])
            print(outstr)
            # print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index]))
        # Probability 62.93% => tench, Tinca tinca
        # Probability 37.07% => background
        return
Beispiel #49
0
 def test_write_and_read_events(self):
     write_events(EVENTS, pj(DATA_PATH, 'events.txt'))
     annotations = load_events(pj(DATA_PATH, 'events.txt'))
     self.assertTrue(np.allclose(annotations, EVENTS))
Beispiel #50
0
 def test_errors(self):
     with self.assertRaises(IOError):
         search_path(pj(DATA_PATH, 'README'))
Beispiel #51
0
 def test_write_events_to_file(self):
     result = write_events(EVENTS, pj(DATA_PATH, 'events.txt'))
     self.assertEqual(EVENTS, result)
Beispiel #52
0
 def get_profile_folder_by_name(self, profname=None):
     if profname is None:
         profname = self.conf['prof']['name']
     return pj(self.conf['prof']['basedir'], profname)
Beispiel #53
0
 def test_load_file_with_comments_and_empty_lines(self):
     events = load_events(pj(DATA_PATH, 'commented_txt'))
     self.assertTrue(np.allclose(events, [1.1, 2.1]))
Beispiel #54
0
 def test_write_events_to_file_handle(self):
     file_handle = open(pj(DATA_PATH, 'events.txt'), 'wb')
     result = write_events(EVENTS, file_handle)
     self.assertEqual(EVENTS, result)
     file_handle.close()
Beispiel #55
0
 def test_read_events_from_file_handle(self):
     file_handle = open(pj(DATA_PATH, 'events.txt'))
     events = load_events(file_handle)
     self.assertIsInstance(events, np.ndarray)
     file_handle.close()
Beispiel #56
0
 def test_load_only_timestamps(self):
     events = load_events(pj(ANNOTATIONS_PATH, 'stereo_sample.notes'))
     self.assertTrue(
         np.allclose(
             events,
             [0.147, 1.567, 2.526, 2.549, 2.563, 2.577, 3.369, 3.449]))
Beispiel #57
0
def main():
    cwd = cf.cwd
    log_dir = pj(cwd, 'train_eval_log')
    ckpt_dir = path.join(log_dir, 'ckpts')

    videoRoot = cf.videoRoot
    # videoRoot = pj(cwd, 'all_data')
    labeljson = cf.labeljson
    evalSet = cf.evalSet
    # evalSet = [47, 48, 49, 51, 52, 59, 61, 62, 63, 65]
    # 47: {'fire': 1601, 'fireless': 57},
    #  48: {'fire': 3748, 'fireless': 98},
    #  49: {'fire': 3714, 'fireless': 40},
    #   51: {'fire': 4120, 'fireless': 21},
    #  52: {'fire': 4451, 'fireless': 45},
    #   59: {'fire': 6911, 'fireless': 70},
    #    61: {'fire': 1298, 'fireless': 0},
    #  62: {'fire': 3275, 'fireless': 0},
    #  63: {'fire': 5055, 'fireless': 0},
    #   65: {'fire': 6913, 'fireless': 64},
    if not path.exists(log_dir):
        os.mkdir(log_dir)
    if not path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)

    eval_interval = 20
    save_summary_steps = 5
    save_ckpts_steps = 100
    train_batchsz = 50
    eval_batchsz = 100
    # eval_steps = 40
    epoch = 900
    img_num = 870 * 2
    max_steps = (img_num * epoch) // train_batchsz

    # ------------------------------ prepare input ------------------------------
    _h = 240
    _w = 320
    dset = cnn_dataset.MyDataset(videoRoot,
                                 labeljson,
                                 evalSet,
                                 resize=(_h, _w))
    dset.setTrainParams(train_batchsz, prefetch=10)
    dset.setEvalParams(eval_batchsz, prefetch=3)
    iter_dict = {'train': dset.makeTrainIter(), 'eval': dset.makeEvalIter()}

    holder_handle = tf.placeholder(tf.string, [])
    iter = tf.data.Iterator.from_string_handle(holder_handle,
                                               dset.output_types)
    # next_elem = iter.get_next()
    inputx, labels, filename = iter.get_next()
    inputx = tf.reshape(inputx, [-1, _h, _w, 3])
    # eval_x.set_shape([eval_batchsz, 200, 250, 3])

    # train_x, train_y, train_fname = dset.train(train_batchsz, prefetch_batch)
    # train_x.set_shape([train_batchsz, 200, 250, 3])
    # eval_x, eval_y, eval_fname = dset.eval(eval_batchsz, prefetch_batch)
    # eval_x.set_shape([eval_batchsz, 200, 250, 3])

    # \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ build graph \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
    model = cnn.CNN('NCHW')
    # model = smodel.Simplest('NHWC')
    logits = model(inputx, castFromUint8=False)
    with tf.name_scope('cross_entropy'):
        loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)

    with tf.name_scope('accuracy'):
        _pred = tf.argmax(logits, axis=1, output_type=tf.int32)
        acc_vec = tf.equal(labels, _pred)
        acc = tf.reduce_mean(tf.cast(acc_vec, tf.float32))

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdamOptimizer(1e-4)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(loss,
                                          tf.train.get_or_create_global_step())

    # ||||||||||||||||||||||||||||||  hooks ||||||||||||||||||||||||||||||
    # >>>  logging
    tf.logging.set_verbosity(tf.logging.INFO)
    # global_step = tf.train.get_or_create_global_step()
    # tf.identity(global_step, 'g_step')
    # tf.identity(loss, 'cross_entropy')
    # tf.identity(acc, 'accuracy')
    # tensor_lr = optimizer._lr_t

    tensors = {
        'step': tf.train.get_or_create_global_step().name,
        'loss': loss.name,
        'accuracy': acc.name
    }
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors, every_n_iter=10)

    # >>>  summary
    summary_conf = {'dir': log_dir, 'saved_steps': save_summary_steps}
    summary_protobuf = {
        'loss': tf.summary.scalar('cross_entropy', loss),
        'accuracy': tf.summary.scalar('accuracy', acc)
    }
    # print(list(summary_protobuf.values()))
    # summary_loss = tf.summary.scalar('cross_entropy', loss)
    # summary_acc = tf.summary.scalar('accuracy', acc)
    # summary_lr = tf.summary.scalar('lr', optimizer._lr_t)
    # global_step =
    # merged_op = tf.summary.merge_all()
    # summary_hook = tf.train.SummarySaverHook(
    #   save_steps=1,
    #   output_dir= ckpt_dir,
    #   summary_op= merged_op
    # )

    # >>> main run hook
    eval_hook = runhooks.RunHook(
        iter_dict=iter_dict,
        eval_steps=eval_interval,
        train_op=train_op,
        training=model.is_training,
        holder_handle=holder_handle,
        summary_conf=summary_conf,
        summary_protobuf=summary_protobuf,
    )

    # >>>  checkpoint saver
    ckpt_saver_hook = runhooks.CkptSaverHook(ckpt_dir,
                                             save_steps=save_ckpts_steps)
    # ckpt_saver_hook = tf.train.CheckpointSaverHook(
    #   checkpoint_dir= ckpt_dir,
    #   save_steps= save_ckpts_steps,
    # )

    all_hooks = [
        # logging_hook,
        # summary_hook,
        eval_hook,
        ckpt_saver_hook,
        # tf.train.StopAtStepHook(max_steps),
        # tf.train.NanTensorHook(loss)
    ]

    # ////////////////////////////// session config //////////////////////////////
    sess_conf = tf.ConfigProto()
    sess_conf.gpu_options.allow_growth = True
    # sess_conf.gpu_options.per_process_gpu_memory_fraction = 0.9

    sess_creator = tf.train.ChiefSessionCreator(
        # scaffold=scaffold,
        # master='',
        config=sess_conf,
        checkpoint_dir=ckpt_dir)
    # print('end')
    # return

    # ------------------------------  start  ------------------------------
    with tf.train.MonitoredSession(session_creator=sess_creator,
                                   hooks=all_hooks,
                                   stop_grace_period_secs=3600) as mon_sess:
        while not mon_sess.should_stop():
            step = mon_sess.run(tf.train.get_global_step()
                                )  # arg from retval of _EvalHook before_run()
            # training, step = mon_sess.run([model.is_training, tf.train.get_global_step()]) # arg from retval of _EvalHook before_run()
            # if not training:
            # print('step {}: eval xxxxxxxxx'.format(step))
            # print(lr)
    return
Beispiel #58
0
 def test_read_onset_annotations(self):
     events = load_events(pj(ANNOTATIONS_PATH, 'sample.onsets'))
     self.assertTrue(np.allclose(events, ONSET_ANNOTATIONS))
Beispiel #59
0
import time
import sys
import os
import os.path as path
from os.path import join as pj
import tensorflow as tf

import var_config as cf
import cnn_dataset
import cnn
repoRoot = r'E:\github_repo\tensorflow_intro_practice'
sys.path.append(pj(repoRoot, 'proj', 'needle_mushroom'))
import runhooks


def main():
    cwd = cf.cwd
    log_dir = pj(cwd, 'train_eval_log')
    ckpt_dir = path.join(log_dir, 'ckpts')

    videoRoot = cf.videoRoot
    # videoRoot = pj(cwd, 'all_data')
    labeljson = cf.labeljson
    evalSet = cf.evalSet
    # evalSet = [47, 48, 49, 51, 52, 59, 61, 62, 63, 65]
    # 47: {'fire': 1601, 'fireless': 57},
    #  48: {'fire': 3748, 'fireless': 98},
    #  49: {'fire': 3714, 'fireless': 40},
    #   51: {'fire': 4120, 'fireless': 21},
    #  52: {'fire': 4451, 'fireless': 45},
    #   59: {'fire': 6911, 'fireless': 70},
Beispiel #60
0
 def test_read_events_from_file(self):
     events = load_events(pj(DATA_PATH, 'events.txt'))
     self.assertIsInstance(events, np.ndarray)