def start(self):
        """
          Starts the server. Call this after the handlers have been set.
        """

        import utils
        utils.makedirs(SOCKET_PATH)
        serversock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sockfile = os.path.join(SOCKET_PATH, DISPLAY)
        try:
            serversock.bind(sockfile)
        except socket.error:
            try:
                os.remove(sockfile)
            except OSError:
                log("Couldn't remove dead socket file \"%s\"." % sockfile)
                sys.exit(1)
            try:
                serversock.bind(sockfile)
            except socket.error:
                log("Couldn't bind to socket. Aborting.")
                sys.exit(1)

        serversock.listen(3)
        serversock.setblocking(False)

        atexit.register(self.__shutdown, serversock, sockfile)

        gobject.timeout_add(100, self.__server_handler, serversock, sockfile)
Exemple #2
0
    def generate(self, in_base_path, out_base_path):
        self.in_base_path = in_base_path;
        self.out_base_path = out_base_path;
        
        utils.makedirs(out_base_path);                
        imgutils.init(in_base_path);
        utils.init(in_base_path);
        
        self.blog = Struct(json.load(utils.open_file(self.in_base_path + "/blog.json")));

        # copy static content
        cmd = "cp -rf " + in_base_path + "/static/* " + out_base_path;
        print("copy static content: " + cmd)
        proc = utils.execute_shell(cmd);
        
        # 'dynamic' content
        for c in ["sticky", "posts"]:
            setattr(self, c, []);
            self.generate_content(c);        
        
        # home page
        self.generate_home();

        # feed
        self.generate_feed();
    def save_lyric(self, data, sid, name, artist):
        save_path = os.path.expanduser(config.get("lyrics", "save_lrc_path"))
        if not os.path.exists(save_path):
            utils.makedirs(save_path)

        try:
            lrc = data['lrc']['lyric']
        except:
            lrc = "[00:00.00] "+name+' - '+artist+"\n[99:59:99] No lyric found\n"
        # deepin music 好像不支持tlyric, tlyric应该是英文歌词的翻译
        # 最好能把英文和翻译合并起来
        #try:
            #tlyric = data['tlyric']['lyric']
        #except:
            #tlyric = None
        #try:
            #klyric = data['klyric']['lyric']
        #except:
            #klyric = None
        #lrc_content = klyric or lrc or tlyric
        lrc_content = lrc
        lrc_path = os.path.join(save_path, str(sid)+'.lrc')
        if not os.path.exists(lrc_path) and lrc_content:
            with open(lrc_path, 'w') as f:
                f.write(str(lrc_content))

        return lrc_path
def write_tiff( hf, filename):
    #Make the directory path if it doesn't exist
    dirname  = os.path.dirname(filename)
    if not os.path.exists(dirname):
       makedirs(dirname)
       
    import struct
    #Get the GTiff driver
    driver = gdal.GetDriverByName("GTiff")

    #Create the dataset
    ds = driver.Create(filename, hf.width, hf.height, 1, gdal.GDT_Float32)
            
    band = ds.GetRasterBand( 1 )
    
    for r in range(0, hf.height):
        scanline = []
        for c in range(0, hf.width):
            height = hf.get_height(c,r)
            scanline.append( height )
        packed_data = struct.pack('f'*len(scanline), *scanline)
        line = hf.height-r-1
        band.WriteRaster(0, line, band.XSize, 1, packed_data, buf_type=gdal.GDT_Float32)
    ds.FlushCache()
    ds = None
def setup_spy():
    # Nothing to do if already in LD_PRELOAD
    if "police-hook.so" in os.environ.get("LD_PRELOAD", ""):
        return

    # Setup environment
    os.environ["LD_PRELOAD"] = "police-hook.so"
    ldlibpath = [
        os.environ.get("LD_LIBRARY_PATH", ""),
        os.path.join(dragon.POLICE_HOME, "hook", "lib32"),
        os.path.join(dragon.POLICE_HOME, "hook", "lib64"),
    ]
    os.environ["LD_LIBRARY_PATH"] = ":".join(ldlibpath)
    os.environ["POLICE_HOOK_LOG"] = dragon.POLICE_SPY_LOG
    os.environ["POLICE_HOOK_RM_SCRIPT"] = os.path.join(
            dragon.POLICE_HOME, "police-rm.sh")
    os.environ["POLICE_HOOK_NO_ENV"] = "1"

    # Setup directory
    utils.makedirs(os.path.dirname(dragon.POLICE_SPY_LOG))

    # Keep previous logs. Spy will append to existing if any
    # If a fresh spy is required, user shall clean it before
    if not os.path.exists(dragon.POLICE_SPY_LOG):
        fd = open(dragon.POLICE_SPY_LOG, "w")
        fd.close()
Exemple #6
0
    def install_addon(self, path):
        """Installs the given addon or directory of addons in the profile."""

        # if the addon is a directory, install all addons in it
        addons = [path]
        if not path.endswith('.xpi') and not os.path.exists(os.path.join(path, 'install.rdf')):
            addons = [os.path.join(path, x) for x in os.listdir(path)]
           
        for addon in addons:
            if addon.endswith('.xpi'):
                tmpdir = tempfile.mkdtemp(suffix = "." + os.path.split(addon)[-1])
                compressed_file = zipfile.ZipFile(addon, "r")
                for name in compressed_file.namelist():
                    if name.endswith('/'):
                        makedirs(os.path.join(tmpdir, name))
                    else:
                        if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
                            makedirs(os.path.dirname(os.path.join(tmpdir, name)))
                        data = compressed_file.read(name)
                        f = open(os.path.join(tmpdir, name), 'wb')
                        f.write(data) ; f.close()
                addon = tmpdir

            # determine the addon id
            addon_id = Profile.addon_id(addon)
            assert addon_id is not None, "The addon id could not be found: %s" % addon
 
            # copy the addon to the profile
            addon_path = os.path.join(self.profile, 'extensions', addon_id)
            copytree(addon, addon_path, preserve_symlinks=1)
            self.addons_installed.append(addon_path)
def build_xsl_reports( 
          locate_root_dir
        , tag
        , expected_results_file
        , failures_markup_file
        , comment_file
        , results_dir
        , result_file_prefix
        , dont_collect_logs = 0
        , reports = report_types
        , warnings = []
        , user = None
        , upload = False
        ):

    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )

    root_paths.append( locate_root_dir )
    root_paths.append( results_dir )
    
    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
    
    output_dir = os.path.join( results_dir, result_file_prefix )
    utils.makedirs( output_dir )
    
    if expected_results_file != '':
        expected_results_file = os.path.abspath( expected_results_file )
    else:
        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )


    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
        
    execute_tasks(
          tag
        , user
        , run_date
        , comment_file
        , results_dir
        , output_dir
        , reports
        , warnings
        , extended_test_results
        , dont_collect_logs
        , expected_results_file
        , failures_markup_file
        )

    if upload:
        upload_dir = 'regression-logs/'
        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
        
        archive_name = '%s.tar.gz' % result_file_prefix
        utils.tar( 
              os.path.join( results_dir, result_file_prefix )
            , archive_name
            )
        
        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
Exemple #8
0
    def _import_path(self, src_path):
        src_path_sha = sha1_for_path(src_path)
        dst_path = self._path_for_file_with_sha(src_path, src_path_sha)
        dst_path_gz = dst_path+'.gz'

        # TODO: this is lame
        if os.path.exists(dst_path):
            return (dst_path, os.path.getsize(dst_path))
        elif os.path.exists(dst_path_gz):
            return (dst_path_gz, os.path.getsize(dst_path_gz))

        # gzip the file first, and see if it passes the compression
        # threshhold

        makedirs(os.path.dirname(dst_path))
        mygzip(src_path, dst_path_gz)
        src_size = os.path.getsize(src_path)
        dst_gz_size = os.path.getsize(dst_path_gz)
        if float(dst_gz_size) / src_size <= self.config.gzip_threshhold:
            final_dst_path = dst_path_gz
            final_dst_size = dst_gz_size
        else:
            final_dst_path = dst_path
            final_dst_size = src_size
            copyfile(src_path, dst_path)
            os.remove(dst_path_gz)

        logging.info("Imported %s --> %s" % (src_path, final_dst_path))
        return (final_dst_path, final_dst_size)
Exemple #9
0
def _create_store(prefix):

    r1 = random.randrange(0xffffffL, 0x7fffffffL)
    r2 = random.randrange(0xffffffL, 0x7fffffffL)
    fname1 = ".!pwstore" + str(r1)
    fname2 = ".!" + str(r2)
    utils.makedirs(os.path.join(prefix, fname1))
    path = os.path.join(prefix, fname1, fname2)

    # is this equivalent to the commented code below? i think so
    # chars = [chr(a) for a in range(256)*16 if a!=26]
    # better :D
    chars = [chr(a) for a in range(256) if a!=26] * 16
    #chars = []
    #for i in xrange(4096):
    #    a = i % 256
    #    if (a == 26): continue
    #    chars.append(chr(a))
    #end for

    data = ""
    while chars:
        index = random.randrange(len(chars))
        c = chars.pop(index)
        data += c
    #end while

    fd = open(path, "w")
    fd.write(data)
    fd.close()
    os.chmod(path, 0400)
Exemple #10
0
    def init_create(cls, source_path, deck_path):
        if exists(deck_path) and \
               (not isdir(deck_path) or len(os.listdir(deck_path)) != 0):
            raise Error("`%s' exists and is not an empty directory" % deck_path)

        storage = DeckStorage(deck_path)
        makedirs(deck_path)
        storage.create(source_path)

        if os.geteuid() == 0:
            mounts = Mounts(source_path)
            if len(mounts):
                id = deckcache.new_id()
                deckcache.blob(id, "w").write(str(mounts))

                storage.mounts_id = id
            elif storage.mounts_id:
                # make an identical copy of the blob
                newid = deckcache.new_id()
                print >> deckcache.blob(newid, "w"), deckcache.blob(storage.mounts_id).read()
                storage.mounts_id = newid

        deck = cls(deck_path)
        deck.mount()

        return deck
Exemple #11
0
    def create(self, source_path):
        if self.exists():
            raise Error("deck `%s' already exists" % self.name)

        if not isdir(source_path):
            raise Error("source `%s' is not a directory" % source_path)

        if is_deck(source_path):
            source = Deck(source_path)
            if source.storage.paths.path != self.paths.path:
                raise Error("cannot branch a new deck from a deck in another directory")

            levels = source.storage.get_levels()
            makedirs(self.stack_path)
            os.symlink(levels[0], join(self.stack_path, "0"))

            # we only need to clone last level if the deck is dirty
            if source.is_dirty():
                levels = levels[1:]
                source.add_level()
            else:
                levels = levels[1:-1] 
                
            for level in levels:
                level_id = basename(level)
                self.add_level(level_id)

            self.mounts_id = source.storage.mounts_id
        else:
            makedirs(self.stack_path)
            os.symlink(realpath(source_path), join(self.stack_path, "0"))

        self.add_level()
Exemple #12
0
    def create_gdk_pixbuf_loaders_setup(self):
        modulespath = self.project.get_bundle_path("Contents/Resources/lib/gtk-2.0/" +
                                                   "${pkg:gtk+-2.0:gtk_binary_version}/"+
                                                   "loaders")
        modulespath = utils.evaluate_pkgconfig_variables (modulespath)

        cmd = "GDK_PIXBUF_MODULEDIR=" + modulespath + " gdk-pixbuf-query-loaders"
        f = os.popen(cmd)

        path = self.project.get_bundle_path("Contents/Resources/etc/gtk-2.0")
        utils.makedirs(path)
        fout = open(os.path.join(path, "gdk-pixbuf.loaders"), "w")

        prefix = "\"" + self.project.get_bundle_path("Contents/Resources")

        for line in f:
            line = line.strip()
            if line.startswith("#"):
                continue

            # Replace the hardcoded bundle path with @executable_path...
            if line.startswith(prefix):
                line = line[len(prefix):]
                line = "\"@executable_path/../Resources" + line
            fout.write(line)
            fout.write("\n")
        fout.close()
Exemple #13
0
 def convert(self, acquisition, participantName):
     cmdTpl = self.mrconvertTpl[acquisition.getDataType()]
     dicomsDir = acquisition.getDicomsDir()
     outputDir = acquisition.getOutputDir()
     utils.makedirs(outputDir)
     outputWithoutExt = acquisition.getOutputWithoutExt()
     call(cmdTpl.format(dicomsDir, outputWithoutExt), shell=True)
Exemple #14
0
    def __init__(self, path, architecture, cpp_opts=[]):
        self.base = path
        makedirs(self.base)

        cpp_opts.append("-I" + self.base)
        cpp_opts.append("-D%s=y" % architecture.upper())
        self.cpp_opts = cpp_opts
Exemple #15
0
    def __init__(self, basedir, serial, name):
        self.path = os.path.join(basedir, str(serial))
        self.serial = serial
        self.name = name

        # check profile version, if not a new device
        if os.path.isdir(self.path):
            if self.version < self.PROFILE_VERSION:
                raise Device.ProfileVersionMismatch("Version on disk is too old")
            elif self.version > self.PROFILE_VERSION:
                raise Device.ProfileVersionMismatch("Version on disk is too new")

        # create directories
        utils.makedirs(self.path)
        for directory in DIRECTORIES:
            directory_path = os.path.join(self.path, directory)
            utils.makedirs(directory_path)

        # write profile version (if none)
        path = os.path.join(self.path, self.PROFILE_VERSION_FILE)
        if not os.path.exists(path):
            with open(path, 'wb') as f:
                f.write(str(self.PROFILE_VERSION))

        # write device name
        path = os.path.join(self.path, self.NAME_FILE)
        if not os.path.exists(path):
            with open(path, 'w') as f:
                f.write(self.name)
Exemple #16
0
    def save(self, filename='index', metadata=True, resources=True):
        ''' save metadata and content

        filename - defines just filename for three files: 
            - <filename>.html 
            - <filename>.metadata 
            - <filename>.resources

        Only the first one is HTML file, the rest of files are JSON files

        metadata - if True, HTTP response information will be stored into .metadata file
        resources - If True, resources metadata will be stores into .resources file 
        '''
        if not self.path:
            raise RuntimeError('Error! The path for storing content is not defined')

        if not os.path.exists(self.path):
            utils.makedirs(self.path)

        # save content metadata
        if metadata:
            utils.save(os.path.join(self.path, '%s.metadata' % filename), 
                    content=unicode(json.dumps(self.metadata['headers'], indent=4, sort_keys=True)))

        # save resources metadata
        if resources and self.metadata['resources']:
            utils.save(os.path.join(self.path, '%s.resources' % filename), 
                    content=unicode(json.dumps(self.metadata['resources'], indent=4, sort_keys=True)))

        # save content
        offline_content = copy.deepcopy(self.content)
        offline_links = dict([(url, self.metadata['resources'][url]['filename']) for url in self.metadata['resources']])
        offline_content.make_links_offline(offline_links=offline_links)
        offline_content.save(os.path.join(self.path, '%s.html' % filename))
Exemple #17
0
    def __init__(self, in_base_path, in_path, out_path):
        self.directory = os.path.basename(in_path);
        self.in_path = in_path;
        self.out_path = out_path + "/" + self.directory;
        self.small_dirname = "small";
        self.date_str = "";
        utils.makedirs(self.out_path + "/"  + self.small_dirname);

        try:
            self.date = time.strptime(self.directory[:10], "%Y-%m-%d");
        except ValueError as e:
            self.date = None;
            pass;

        self.attachments = [];

        for f in os.listdir(in_path):
            f_full = in_path + "/" + f;
            if (os.path.isfile(f_full)):
                if (f[-4:] == ".mml"):
                    if (hasattr(self, "page_path")):
                        utils.fatal("multipe content found in " + in_path);
                    self.page_path = f_full;
                    self.processHeader();
                else:
                    self.attachments.append(f);
        self.processAttachments();                
Exemple #18
0
    def get_resources(self, pattern=None):
        ''' fetch resources (images, css, javascript, video, ...)
        '''
        if not pattern:
            raise RuntimeError('Error! The pattern is not defined')

        pattern = re.compile(pattern)

        if self.cached and self.path:
            cache_dir = os.path.join(self.path, 'files/')
            fetcher = self._get_fetcher(headers=self.headers, cached=True, cache_dir=cache_dir)
        else:
            fetcher = self._get_fetcher(headers=self.headers)

        for link in self.content.links():
            if pattern and pattern.search(link):

                offline_filename = os.path.join(self.path, utils.offline_link(link))
                utils.makedirs(offline_filename)

                response = fetcher.fetch(link, to_file=offline_filename)
                response.pop(u'content')
                url = response.pop(u'url')
                if url is not self.metadata['resources']:
                    self.metadata['resources'][url] = response
                    response['filename'] = response['filename'].replace(self.path, '')
                    self.metadata['resources'][url]['filename'] = response['filename']
Exemple #19
0
    def copy_path(self, Path):
        _doRecurse = False
        source = self.project.evaluate_path(Path.source)
        if Path.dest:
            dest = self.project.evaluate_path(Path.dest)
        else:
            # Source must begin with a prefix if we don't have a
            # dest. Skip past the source prefix and replace it with
            # the right bundle path instead.
            p = re.compile("^\${prefix(:.*?)?}/")
            m = p.match(Path.source)
            if m:
                relative_dest = self.project.evaluate_path(Path.source[m.end() :])
                dest = self.project.get_bundle_path("Contents/Resources", relative_dest)
            else:
                print "Invalid bundle file, missing or invalid 'dest' property: " + Path.dest
                sys.exit(1)

        (dest_parent, dest_tail) = os.path.split(dest)
        utils.makedirs(dest_parent)

        # Check that the source only has wildcards in the last component.
        p = re.compile("[\*\?]")
        (source_parent, source_tail) = os.path.split(source)
        if p.search(source_parent):
            print "Can't have wildcards except in the last path component: " + source
            sys.exit(1)

        if p.search(source_tail):
            source_check = source_parent
            if Path.recurse:
                _doRecurse = True
        else:
            source_check = source
        if not os.path.exists(source_check):
            print "Cannot find source to copy: " + source
            sys.exit(1)

        # If the destination has a wildcard as last component (copied
        # from the source in dest-less paths), ignore the tail.
        if p.search(dest_tail):
            dest = dest_parent

        if _doRecurse:
            for root, dirs, files in os.walk(source_parent):
                destdir = os.path.join(dest, os.path.relpath(root, source_parent))
                utils.makedirs(destdir)
                for globbed_source in glob.glob(os.path.join(root, source_tail)):
                    try:
                        #                        print "Copying %s to %s" % (globbed_source, destdir)
                        shutil.copy(globbed_source, destdir)
                    except EnvironmentError, e:
                        if e.errno == errno.ENOENT:
                            print "Warning, source file missing: " + globbed_source
                        elif e.errno == errno.EEXIST:
                            print "Warning, path already exits: " + dest
                        else:
                            print "Error %s when copying file: %s" % (str(e), globbed_source)
                            sys.exit(1)
def run_g2s(a_output_dir, a_filename, a_f107, a_f107a, a_ap):
    """
    $g2smodel_dir/$g2smodel -v -d $output_dir -i $F107 $F107a $Ap $ECMWF_ops/$ENfilename >& $logdir/$g2s_logfile
    """
    conf = Conf.get_instance()

    #get info from conf
    log_dir = conf.get('G2S', 'g2s_log_dir')
    #the_dir    = conf.get('G2S', 'dir')
    exe = conf.get('G2S', 'exe')

    log_file = '%s/g2s_%s.log' % (log_dir, os.path.basename(a_filename))

    print("log_dir = %s\n" % (log_dir))
    print("output_dir = %s\n" % (a_output_dir))

    # makedirs
    makedirs(a_output_dir)
    makedirs(log_dir)

    #check if the bin file already exists. If yes don't do anything
    the_date = get_datetime_from_ecmwf_file_name(a_filename)
    bin_file = 'G2SGCSx%s_HWM07.bin' % (the_date.strftime('%Y%m%d%H'))

    # look for bin_file
    if not os.path.exists('%s/%s' % (a_output_dir, bin_file)):

        # substitute command line
        exe = re.sub(r'\${log_file}', log_file, exe)
        exe = re.sub(r'\${output_dir}', a_output_dir, exe)

        # substitue params
        exe = re.sub(r'\${f107}', a_f107, exe)
        exe = re.sub(r'\${f107a}', a_f107a, exe)
        exe = re.sub(r'\${ap}', a_ap, exe)
        # substitute ecmwf file
        exe = re.sub(r'\${ecmwf_file}', a_filename, exe)

        command = '%s' % (exe)

        print('will run [%s]\n' % (command))

        # call the command
        retcode = call(command, shell=True)

        print("retcode %s\n" % (retcode))

        if retcode != 0:
            raise Exception("Cannot run g2s for %s. Check error in %s" %
                            (a_filename, log_file))

        print("G2S bin file %s/%s generated \n" % (a_output_dir, bin_file))

    else:
        print("G2S bin file %s/%s previously generated \n" % (a_output_dir,
                                                              bin_file))

    return '%s/%s' % (a_output_dir, bin_file)
Exemple #21
0
 def extract_symlink(self, arcname):
     link_name = self.get_dst(arcname)
     source = self.z.read(arcname)
     dirn, filename = os.path.split(link_name)
     makedirs(dirn)
     if os.path.exists(link_name):
         os.unlink(link_name)
     os.symlink(source, link_name)
     return link_name
Exemple #22
0
def build():
    makedirs(DIR)
    locations = []
    for canonical, region in REGIONS.items():
        location = build_region(canonical, region)
        locations.append(location)
    build_index(sorted(
        locations,
        key = lambda location: location['name']
    ))
Exemple #23
0
        def __set__(self, obj, val):
            if not exists(obj.paths.mounts):
                makedirs(obj.paths.mounts)

            path = join(obj.paths.mounts, obj.name)
            if val is None:
                if exists(path):
                    os.remove(path)
            else:
                file(path, "w").write(val + "\n")
Exemple #24
0
def create_catalog_at_path(path, id):

    path = canonical_path(path)
    try:
        makedirs(path)
    except OSError, e:
        if e.errno == 17:
            pass # directory already exists
        else:
            raise e
Exemple #25
0
    def new_id(self, seed=None):
        def digest(s):
            return hashlib.md5(s).hexdigest()

        id = digest(`seed` + `time.time()` + `random.SystemRandom().getrandbits(128)`)
        while self.exists(id):
            id = digest(id)

        makedirs(dirname(self._get_blob_path(id)))
        return id
Exemple #26
0
    def create_gdk_pixbuf_loaders_setup(self):
        modulespath = ""
        cachepath = ""
        if os.path.exists(os.path.join(self.project.get_prefix(), "lib", 
                                       "gdk-pixbuf-2.0")):

            modulespath = self.project.get_bundle_path("Contents/Resources/lib/",
                                                     "gdk-pixbuf-2.0", 
                                                     "${pkg:gdk-pixbuf-2.0:gdk_pixbuf_binary_version}",
                                                     "loaders")
            cachepath = self.project.get_bundle_path("Contents/Resources/lib/",
                                                     "gdk-pixbuf-2.0",
                                                     "${pkg:gdk-pixbuf-2.0:gdk_pixbuf_binary_version}",
                                                     "loaders.cache")
        elif os.path.exists(os.path.join(self.project.get_prefix(), "lib", 
                                       "gdk-pixbuf-3.0")):
            modulespath = self.project.get_bundle_path("Contents/Resources/lib/",
                                                     "gdk-pixbuf-3.0", 
                                                     "${pkg:gdk-pixbuf-3.0:gdk_pixbuf_binary_version}",
                                                     "loaders")
            cachepath = self.project.get_bundle_path("Contents/Resources/lib/",
                                                     "gdk-pixbuf-3.0",
                                                     "${pkg:gdk-pixbuf-3.0:gdk_pixbuf_binary_version}",
                                                     "loaders.cache")
        else:
            modulespath = self.project.get_bundle_path("Contents/Resources/lib/",
                                                       self.project.get_gtk_dir(),
                                                       "${pkg:" + self.meta.gtk + ":gtk_binary_version}",
                                                       "loaders")
            cachepath = self.project.get_bundle_path("Contents/Resources/etc/",
                                                     self.project.get_gtk_dir(),
                                                     "gdk-pixbuf.loaders")

        modulespath = utils.evaluate_pkgconfig_variables (modulespath)
        cachepath = utils.evaluate_pkgconfig_variables (cachepath)

        cmd = "GDK_PIXBUF_MODULEDIR=" + modulespath + " gdk-pixbuf-query-loaders"
        f = os.popen(cmd)

        utils.makedirs(os.path.dirname(cachepath))
        fout = open(cachepath, "w")

        prefix = "\"" + self.project.get_bundle_path("Contents/Resources")
        for line in f:
            line = line.strip()
            if line.startswith("#"):
                continue

            # Replace the hardcoded bundle path with @executable_path...
            if line.startswith(prefix):
                line = line[len(prefix):]
                line = "\"@executable_path/../Resources" + line
            fout.write(line)
            fout.write("\n")
        fout.close()
Exemple #27
0
 def __init__(self, vars, mode):
     utils.makedirs(self.mntpnt)
     try:
         utils.sysexec('mount -t auto -U %s %s' % (vars['DISCONNECTED_OPERATION'], self.mntpnt))
         # make sure it's not read-only
         f = file('%s/api.cache' % self.mntpnt, 'a')
         f.close()
         file.__init__(self, '%s/api.cache' % self.mntpnt, mode)
     except:
         utils.sysexec_noerr('umount %s' % self.mntpnt)
         raise BootManagerException, "Couldn't find API-cache for disconnected operation"
Exemple #28
0
    def install_addon(self, path):
        """Installs the given addon or directory of addons in the profile."""
        addons = [path]
        if not path.endswith('.xpi') and not os.path.exists(os.path.join(path, 'install.rdf')):
            addons = [os.path.join(path, x) for x in os.listdir(path)]
           
        for addon in addons:
            tmpdir = None
            if addon.endswith('.xpi'):
                tmpdir = tempfile.mkdtemp(suffix = "." + os.path.split(addon)[-1])
                compressed_file = zipfile.ZipFile(addon, "r")
                for name in compressed_file.namelist():
                    if name.endswith('/'):
                        makedirs(os.path.join(tmpdir, name))
                    else:
                        if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
                            makedirs(os.path.dirname(os.path.join(tmpdir, name)))
                        data = compressed_file.read(name)
                        f = open(os.path.join(tmpdir, name), 'wb')
                        f.write(data) ; f.close()
                addon = tmpdir

            tree = ElementTree.ElementTree(file=os.path.join(addon, 'install.rdf'))
            # description_element =
            # tree.find('.//{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description/')

            desc = tree.find('.//{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description')
            apps = desc.findall('.//{http://www.mozilla.org/2004/em-rdf#}targetApplication')
            for app in apps:
              desc.remove(app)
            if len(desc) and desc.attrib.has_key('{http://www.mozilla.org/2004/em-rdf#}id'):
                addon_id = desc.attrib['{http://www.mozilla.org/2004/em-rdf#}id']
            elif len(desc) and desc.find('.//{http://www.mozilla.org/2004/em-rdf#}id') is not None:
                addon_id = desc.find('.//{http://www.mozilla.org/2004/em-rdf#}id').text
            else:
                about = [e for e in tree.findall(
                            './/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description') if
                             e.get('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about') ==
                             'urn:mozilla:install-manifest'
                        ]

                x = e.find('.//{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description')

                if len(about) == 0:
                    addon_element = tree.find('.//{http://www.mozilla.org/2004/em-rdf#}id')
                    addon_id = addon_element.text
                else:
                    addon_id = about[0].get('{http://www.mozilla.org/2004/em-rdf#}id')

            addon_path = os.path.join(self.profile, 'extensions', addon_id)
            copytree(addon, addon_path, preserve_symlinks=1)
            self.addons_installed.append(addon_path)
Exemple #29
0
    def run(self):
        '''

        :return:
        '''
        #command example
        #rtg-tools-3.8.4-bdba5ea_install/rtg vcfeval --baseline truth.vcf.gz \
        #--calls compare1.vcf.gz -o vcfeval_split_snp -t ref.sdf --output-mode=annotate --sample xx --squash-ploidy --regions ?? \
        cmd = ['java', utils.JAVA_XMX, '-jar', utils.RTGJAR, 'vcfeval',
               '-o', self.prefix, '--baseline',
               self.true_vcf,
               '-t', self.reference,
               ]
        if not self.exclude_filtered:
            cmd.append('--all-records')
        if not self.match_geno:
            cmd.append('--squash-ploidy')
        if self.sample:
            cmd.append('--sample')
            cmd.append(self.sample)
        if self.regions:
            cmd.append('--bed-regions')
            cmd.append(self.regions)
        if self.opts:
            cmd.append(self.opts)
        if len(self.vcfs) != 1:
            raise ValueError('vcfeval only takes 1 prediction VCF and 1 truth VCF: {0}'.format(self.vcfs))
        cmd.append('--calls')
        cmd.append(self.vcfs[0])

        tp = os.path.join(self.prefix, 'tp-baseline.vcf.gz')
        tp_predict = os.path.join(self.prefix, 'tp.vcf.gz')
        fn = os.path.join(self.prefix, 'fn.vcf.gz')
        fp = os.path.join(self.prefix, 'fp.vcf.gz')

        if utils.count_variants(self.true_vcf) == 0 and utils.count_variants(self.vcfs[0]) == 0:
            #both truth and prediction are empty, do nothing
            utils.makedirs([self.prefix])
            shutil.copyfile(self.true_vcf, tp)
            shutil.copyfile(self.true_vcf, fn)
            shutil.copyfile(self.vcfs[0], tp_predict)
            shutil.copyfile(self.vcfs[0], fp)
        else:
            if self.log_to_file:
                with utils.versatile_open(self.log_to_file, 'a') as logout:
                    utils.run_shell_command(cmd, sys.stderr, logout)
            else:
                utils.run_shell_command(cmd, sys.stderr, sys.stderr)
        for i in (tp, tp_predict, fn, fp):
            if not os.path.exists(i):
                raise Exception('{0} was not generated by vcfeval. Please check and rerun.'.format(i))
        self.tp, self.tp_predict, self.fn, self.fp = tp, tp_predict, fn, fp
Exemple #30
0
    def __init__(self):
        ant.fs.manager.Application.__init__(self)
        GObject.GObject.__init__(self)

        self.path = os.path.join(GLib.get_user_data_dir(), self.PRODUCT_NAME)
        utils.makedirs(self.path)

        self.status = Garmin.Status.NONE
        self.device = None
        self.funcs = []

        self.loop = None
        self.timeout_source = None
import torch
import torchvision as tv
import numpy as np
from torch.utils.data import DataLoader
from torchvision import models
import torch.nn as nn
from utils import makedirs, tensor2cuda, load_model
from argument import parser
from visualization import VanillaBackprop
import patch_dataset as patd
from model.resnetdsbn import *

args = parser()
img_folder = 'grad_img'
img_folder = os.path.join(img_folder, args.dataset, args.affix)
makedirs(img_folder)
out_num = 1

transform_test = tv.transforms.Compose(
    [tv.transforms.Resize(256),
     tv.transforms.ToTensor()])
te_dataset = patd.PatchDataset(path_to_images=args.data_root,
                               fold='test',
                               transform=tv.transforms.ToTensor())
te_loader = DataLoader(te_dataset, batch_size=1, shuffle=False, num_workers=1)

counter = 0
input_list = []
grad_list = []
label_list = []
for data, label in te_loader:
def populate_relation(result,
                      name,
                      alphas=[0.05],
                      split_detect=True,
                      multiple_test_method='fdr_by'):
    print("Populate relation", name)
    # create save folder
    save_dir = utils.makedirs([config.analysis_dir, name])
    relation_dir = utils.makedirs([save_dir, 'relations'])
    metric_dir = utils.makedirs([save_dir, 'four_metrics'])

    # get other attributes
    attr_mean_acc = Compare(
        result, mean_acc,
        test_acc).compare_result  # attr: dirty_acc, clean_acc
    attr_diff_acc = Compare(result, diff_acc,
                            test_acc).compare_result  # attr: diff_acc
    attr_mean_f1 = Compare(result, mean_f1,
                           test_f1).compare_result  # attr: dirty_f1, clean_f1
    attr_diff_f1 = Compare(result, diff_f1,
                           test_f1).compare_result  # attr: diff_f1
    attr_count = Compare(
        result, direct_count,
        mixed_f1_acc).compare_result  # attr: pos count, neg count, same count

    # run t-test
    t_test_comp = Compare(result, t_test, mixed_f1_acc)
    t_test_comp.save_four_metrics(metric_dir)

    # hypothesis test
    for alpha in alphas:
        # print(alpha)
        # get attribute flag by multiple hypothesis test
        attr_flag = hypothesis_test(t_test_comp.compare_result, alpha,
                                    multiple_test_method)

        # populate relation with all of attributes
        relation = {
            **attr_flag,
            **attr_mean_acc,
            **attr_mean_f1,
            **attr_diff_acc,
            **attr_diff_f1,
            **attr_count
        }

        # split detect
        if split_detect and name != "R3":
            relation = split_clean_method(relation)

        # eliminate redundant attribute for R2 and R3

        if name == "R2":
            redundant_dims = [4] if split_detect else [3]
            relation = elim_redundant_dim(relation, redundant_dims)
        if name == "R3":
            redundant_dims = [2, 3]
            relation = elim_redundant_dim(relation, redundant_dims)

        # convert dict to df
        n_key = len(list(relation.keys())[0])
        relation_df = utils.dict_to_df(relation, list(range(n_key - 1)),
                                       [n_key - 1])

        # save relation to csv and pkl
        relation_csv_dir = utils.makedirs([relation_dir, 'csv'])
        save_path = os.path.join(
            relation_csv_dir,
            '{}_{}.csv'.format(name, "{:.6f}".format(alpha).rstrip('0')))
        relation_df.to_csv(save_path)

        relation_pkl_dir = utils.makedirs([relation_dir, 'pkl'])
        save_path = os.path.join(
            relation_pkl_dir,
            '{}_{}.pkl'.format(name, "{:.6f}".format(alpha).rstrip('0')))
        utils.df_to_pickle(relation_df, save_path)
Exemple #33
0
            wav_end_idx = None

    wav = wav[wav_start_idx:wav_end_idx]

    if end_of_sentence:
        wav = np.lib.pad(wav, ((0, 20), (0, 0)), 'constant', constant_values=0)
    else:
        wav = np.lib.pad(wav, ((0, 10), (0, 0)), 'constant', constant_values=0)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--load_path', required=True)
    parser.add_argument('--sample_path', default="samples")
    parser.add_argument('--text', required=True)
    parser.add_argument('--num_speakers', default=1, type=int)
    parser.add_argument('--speaker_id', default=0, type=int)
    parser.add_argument('--checkpoint_step', default=None, type=int)
    config = parser.parse_args()

    makedirs(config.sample_path)

    synthesizer = Synthesizer()
    synthesizer.load(config.load_path, config.num_speakers,
                     config.checkpoint_step)

    audio = synthesizer.synthesize(texts=[config.text],
                                   base_path=config.sample_path,
                                   speaker_ids=[config.speaker_id],
                                   attention_trim=False)[0]
Exemple #34
0
        # test2017.zip, for those who want to attend the competition.
        # ('http://images.cocodataset.org/zips/test2017.zip',
        #  '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'),
    ]
    makedirs(path)
    for url, checksum in _DOWNLOAD_URLS:
        filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
        # extract
        with zipfile.ZipFile(filename) as zf:
            zf.extractall(path=path)


if __name__ == '__main__':
    args = parse_args()
    path = os.path.expanduser(args.download_dir)
    if not os.path.isdir(path) or not os.path.isdir(os.path.join(path, 'train2017')) \
            or not os.path.isdir(os.path.join(path, 'val2017')) \
            or not os.path.isdir(os.path.join(path, 'annotations')):
        if args.no_download:
            raise ValueError(('{} is not a valid directory, make sure it is present.'
                              ' Or you should not disable "--no-download" to grab it'.format(path)))
        else:
            download_coco(path, overwrite=args.overwrite)

    # make symlink
    makedirs(os.path.expanduser('~/.torch/datasets'))
    if os.path.isdir(_TARGET_DIR):
        os.remove(_TARGET_DIR)
    os.symlink(path, _TARGET_DIR)
    try_import_pycocotools()
def output_visuals(batch_data, outputs, args):

    mag_mix = batch_data['mag_mix']
    phase_mix = batch_data['phase_mix']
    features = batch_data['feats']
    coords = batch_data['coords']
    infos = batch_data['infos']

    pred_masks_ = outputs['pred_masks']
    gt_masks_ = outputs['gt_masks']
    mag_mix_ = outputs['mag_mix']
    weight_ = outputs['weight']

    # unwarp log scale
    N = args.num_mix
    FN = args.num_frames
    B = mag_mix.size(0)
    pred_masks_linear = [None for n in range(N)]
    gt_masks_linear = [None for n in range(N)]
    for n in range(N):
        if args.log_freq:
            grid_unwarp = torch.from_numpy(
                warpgrid(B,
                         args.stft_frame // 2 + 1,
                         gt_masks_[0].size(3),
                         warp=False)).to(args.device)
            pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
            gt_masks_linear[n] = F.grid_sample(gt_masks_[n], grid_unwarp)
        else:
            pred_masks_linear[n] = pred_masks_[n]
            gt_masks_linear[n] = gt_masks_[n]

    # convert into numpy
    mag_mix = mag_mix.numpy()
    mag_mix_ = mag_mix_.detach().cpu().numpy()
    phase_mix = phase_mix.numpy()
    weight_ = weight_.detach().cpu().numpy()
    for n in range(N):
        pred_masks_[n] = pred_masks_[n].detach().cpu().numpy()
        pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
        gt_masks_[n] = gt_masks_[n].detach().cpu().numpy()
        gt_masks_linear[n] = gt_masks_linear[n].detach().cpu().numpy()

        # threshold if binary mask
        if args.binary_mask:
            pred_masks_[n] = (pred_masks_[n] > args.mask_thres).astype(
                np.float32)
            pred_masks_linear[n] = (pred_masks_linear[n] >
                                    args.mask_thres).astype(np.float32)

    # loop over each sample
    for j in range(B):

        # video names
        prefix = []
        for n in range(N):
            prefix.append('-'.join(
                infos[n][0][j].split('/')[-2:]).split('.')[0])
        prefix = '+'.join(prefix)
        makedirs(os.path.join(args.vis, prefix))

        # save mixture
        mix_wav = istft_reconstruction(mag_mix[j, 0],
                                       phase_mix[j, 0],
                                       hop_length=args.stft_hop)
        mix_amp = magnitude2heatmap(mag_mix_[j, 0])
        weight = magnitude2heatmap(weight_[j, 0], log=False, scale=100.)
        filename_mixwav = os.path.join(prefix, 'mix.wav')
        filename_mixmag = os.path.join(prefix, 'mix.jpg')
        filename_weight = os.path.join(prefix, 'weight.jpg')
        imageio.imwrite(os.path.join(args.vis, filename_mixmag),
                        mix_amp[::-1, :, :])
        imageio.imwrite(os.path.join(args.vis, filename_weight),
                        weight[::-1, :])
        wavfile.write(os.path.join(args.vis, filename_mixwav), args.audRate,
                      mix_wav)

        # save each component
        preds_wav = [None for n in range(N)]
        for n in range(N):
            # GT and predicted audio recovery
            gt_mag = mag_mix[j, 0] * gt_masks_linear[n][j, 0]
            gt_wav = istft_reconstruction(gt_mag,
                                          phase_mix[j, 0],
                                          hop_length=args.stft_hop)
            pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
            preds_wav[n] = istft_reconstruction(pred_mag,
                                                phase_mix[j, 0],
                                                hop_length=args.stft_hop)

            # output masks
            filename_gtmask = os.path.join(prefix,
                                           'gtmask{}.jpg'.format(n + 1))
            filename_predmask = os.path.join(prefix,
                                             'predmask{}.jpg'.format(n + 1))
            gt_mask = (np.clip(gt_masks_[n][j, 0], 0, 1) * 255).astype(
                np.uint8)
            pred_mask = (np.clip(pred_masks_[n][j, 0], 0, 1) * 255).astype(
                np.uint8)
            imageio.imwrite(os.path.join(args.vis, filename_gtmask),
                            gt_mask[::-1, :])
            imageio.imwrite(os.path.join(args.vis, filename_predmask),
                            pred_mask[::-1, :])

            # ouput spectrogram (log of magnitude, show colormap)
            filename_gtmag = os.path.join(prefix, 'gtamp{}.jpg'.format(n + 1))
            filename_predmag = os.path.join(prefix,
                                            'predamp{}.jpg'.format(n + 1))
            gt_mag = magnitude2heatmap(gt_mag)
            pred_mag = magnitude2heatmap(pred_mag)
            imageio.imwrite(os.path.join(args.vis, filename_gtmag),
                            gt_mag[::-1, :, :])
            imageio.imwrite(os.path.join(args.vis, filename_predmag),
                            pred_mag[::-1, :, :])

            # output audio
            filename_gtwav = os.path.join(prefix, 'gt{}.wav'.format(n + 1))
            filename_predwav = os.path.join(prefix, 'pred{}.wav'.format(n + 1))
            wavfile.write(os.path.join(args.vis, filename_gtwav), args.audRate,
                          gt_wav)
            wavfile.write(os.path.join(args.vis, filename_predwav),
                          args.audRate, preds_wav[n])

            #output pointclouds
            for f in range(FN):
                idx = torch.where(coords[n][f][:, 0] == j)
                path_point = os.path.join(
                    args.vis, prefix,
                    'point{}_frame{}.ply'.format(n + 1, f + 1))
                if args.rgbs_feature:
                    colors = np.asarray(features[n][f][idx])
                    xyz = np.asarray(coords[n][f][idx][:, 1:4])
                    xyz = xyz * args.voxel_size
                    save_points(path_point, xyz, colors)
                else:
                    xyz = np.asarray(features[n][f][idx])
                    save_points(path_point, xyz)
def run(args):
    # setting the GPU #
    os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    logger.info('Read data:')
    train_A, train_B, test_A, test_B = get_data(args.task, args.image_size)

    logger.info('Build graph:')
    model = BicycleGAN(args)

    variables_to_save = tf.global_variables()
    init_op = tf.variables_initializer(variables_to_save)
    init_all_op = tf.global_variables_initializer()
    saver = FastSaver(variables_to_save)

    logger.info('Trainable vars:')
    var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 tf.get_variable_scope().name)
    for v in var_list:
        logger.info('  %s %s', v.name, v.get_shape())

    if args.load_model != '':
        model_name = args.load_model
    else:
        model_name = '{}_{}'.format(
            args.task,
            datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
    logdir = './logs'
    makedirs(logdir)
    logdir = os.path.join(logdir, model_name)
    logger.info('Events directory: %s', logdir)
    summary_writer = tf.summary.FileWriter(logdir)

    makedirs('./results')

    def init_fn(sess):
        logger.info('Initializing all parameters.')
        sess.run(init_all_op)

    sv = tf.train.Supervisor(
        is_chief=True,
        logdir=logdir,
        saver=saver,
        summary_op=None,
        init_op=init_op,
        init_fn=init_fn,
        summary_writer=summary_writer,
        ready_op=tf.report_uninitialized_variables(variables_to_save),
        global_step=model.global_step,
        save_model_secs=300,
        save_summaries_secs=30)

    if args.train:
        logger.info("Starting training session.")
        with sv.managed_session() as sess:
            model.train(sess, summary_writer, train_A, train_B)

    logger.info("Starting testing session.")
    with sv.managed_session() as sess:
        base_dir = os.path.join('results', model_name)
        makedirs(base_dir)
        model.test(sess, test_A, test_B, base_dir)
    def run_experiment(self):
        """
        Runs a full training experiment with evaluations of the model on the val set at every epoch.
        """
        if self.args.mode == 'test':
            print('Start testing')
            num_test_tasks = self.data.dataset.data_length['test']
            with tqdm(total=int(num_test_tasks /
                                self.args.test_batch_size)) as pbar_test:
                for _, test_sample in enumerate(
                        self.data.get_test_batches(
                            total_batches=int(num_test_tasks /
                                              self.args.test_batch_size))):
                    outputs = self.test_iteration(test_sample=test_sample,
                                                  pbar_test=pbar_test,
                                                  phase='test')
                    batch_size = test_sample[0][0].shape[0]

                    for k in range(batch_size):
                        imgpath1 = test_sample[1]['imgpaths'][1][k]
                        imgpath2 = test_sample[1]['imgpaths'][2][k]
                        filename1 = imgpath1.split('/')[-1]
                        filename2 = imgpath2.split('/')[-1]
                        float_ind1 = float(filename1.split('_')[-1][:-4])
                        float_ind2 = float(filename2.split('_')[-1][:-4])
                        if float_ind2 == 0:
                            float_ind2 = 1.0
                        im_path = os.path.join(
                            self.args.data_root, '%s_%.06f.%s' %
                            (filename1.split('_')[0],
                             (float_ind1 + float_ind2) / 2, self.args.img_fmt))

                        utils.save_image(outputs[k], im_path)

            print('Test finished.')
            return

        elif self.args.mode == 'val':
            print('Validation only')
            total_losses = dict()
            val_losses = dict()
            metrics_accumulator = {
                'psnr': utils.AverageMeter(),
                'ssim': utils.AverageMeter()
            }
            num_evaluation_tasks = self.data.dataset.data_length['val']
            with tqdm(total=int(num_evaluation_tasks /
                                self.args.val_batch_size)) as pbar_val:
                for _, val_sample in enumerate(
                        self.data.get_val_batches(
                            total_batches=int(num_evaluation_tasks /
                                              self.args.val_batch_size))):
                    val_losses, outputs, metrics = self.evaluation_iteration(
                        val_sample=val_sample,
                        total_losses=total_losses,
                        pbar_val=pbar_val,
                        phase='val')
                    batch_size = val_sample[0][0].shape[0]
                    for k, v in metrics.items():
                        metrics_accumulator[k].update(v.avg, n=v.count)

                    for k in range(batch_size):
                        paths = val_sample[1]['imgpaths'][3][k].split('/')
                        save_dir = os.path.join('checkpoint',
                                                self.args.exp_name,
                                                self.args.dataset, paths[-3],
                                                paths[-2])
                        if not os.path.exists(save_dir):
                            utils.makedirs(save_dir)
                        im_path = os.path.join(
                            save_dir,
                            paths[-1])  # 'im4.png' for VimeoSeptuplet

                        utils.save_image(outputs[0][k], im_path)
                    del val_losses, outputs, metrics

            print("%d examples processed" % metrics_accumulator['psnr'].count)
            print("PSNR: %.2f,  SSIM: %.4f\n" %
                  (metrics_accumulator['psnr'].avg,
                   metrics_accumulator['ssim'].avg))
            return

        with tqdm(initial=self.state['current_iter'],
                  total=int(self.args.total_iter_per_epoch *
                            self.args.max_epoch)) as pbar_train:

            # training main loop
            while (self.state['current_iter'] <
                   (self.args.max_epoch * self.args.total_iter_per_epoch)):

                for train_sample_idx, train_sample in enumerate(
                        self.data.get_train_batches(
                            total_batches=int(self.args.total_iter_per_epoch *
                                              self.args.max_epoch) -
                            self.state['current_iter'])):

                    train_losses, outputs, metrics, self.state[
                        'current_iter'] = self.train_iteration(
                            train_sample=train_sample,
                            total_losses=self.total_losses,
                            epoch_idx=(self.state['current_iter'] /
                                       self.args.total_iter_per_epoch),
                            pbar_train=pbar_train,
                            current_iter=self.state['current_iter'],
                            sample_idx=self.state['current_iter'],
                            do_evaluation=(self.state['current_iter'] %
                                           self.args.eval_iter == 0))

                    # Log to Tensorboard
                    if self.state[
                            'current_iter'] % self.args.log_iter == 1 and self.use_tensorboard:
                        utils.log_tensorboard(
                            self.writer,
                            train_losses,
                            metrics['psnr'].avg,
                            metrics['ssim'].avg,
                            None,
                            self.model.optimizer.param_groups[0]['lr'],
                            self.state['current_iter'],
                            mode='train')

                    # validation
                    if self.state[
                            'current_iter'] % self.args.total_iter_per_epoch == 0:

                        total_losses = dict()
                        val_losses = {}
                        metrics_accumulator = {
                            'psnr': utils.AverageMeter(),
                            'ssim': utils.AverageMeter()
                        }
                        num_evaluation_tasks = self.data.dataset.data_length[
                            'val']
                        with tqdm(total=int(num_evaluation_tasks /
                                            self.args.val_batch_size +
                                            0.99)) as pbar_val:
                            for _, val_sample in enumerate(
                                    self.data.get_val_batches(
                                        total_batches=int(
                                            num_evaluation_tasks /
                                            self.args.val_batch_size + 0.99))):
                                val_loss, outputs, metrics = self.evaluation_iteration(
                                    val_sample=val_sample,
                                    total_losses=total_losses,
                                    pbar_val=pbar_val,
                                    phase='val')
                                for k, v in metrics.items():
                                    metrics_accumulator[k].update(v.avg,
                                                                  n=v.count)
                                for k, v in val_loss.items():
                                    if k not in val_losses.keys():
                                        val_losses[k] = utils.AverageMeter()
                                    val_losses[k].update(v)

                                del val_loss, outputs, metrics

                            for k, v in val_losses.items():
                                val_losses[k] = v.avg
                            if val_losses["total"] < self.state[
                                    'best_val_loss']:
                                print("Best validation loss",
                                      val_losses["total"])
                                self.state['best_val_loss'] = val_losses[
                                    "total"]
                                self.state['best_val_psnr'] = self.state[
                                    'current_iter']
                                self.state['best_epoch'] = int(
                                    self.state['best_val_iter'] /
                                    self.args.total_iter_per_epoch)
                        print("validation PSNR: %.2f,  SSIM: %.4f\n" %
                              (metrics_accumulator['psnr'].avg,
                               metrics_accumulator['ssim'].avg))

                        # log to TensorBoard
                        if self.use_tensorboard:
                            utils.log_tensorboard(
                                self.writer,
                                val_losses,
                                metrics_accumulator['psnr'].avg,
                                metrics_accumulator['ssim'].avg,
                                None,
                                self.model.optimizer.param_groups[0]['lr'],
                                self.state['current_iter'],
                                mode='val')

                        self.epoch += 1

                        PSNR = metrics_accumulator['psnr'].avg
                        is_best = PSNR > self.best_PSNR
                        self.best_PSNR = max(PSNR, self.best_PSNR)
                        utils.save_checkpoint(
                            {
                                'epoch': self.epoch,
                                'arch': self.args,
                                'state_dict': self.model.state_dict(),
                                # 'optimizer': optimizer.state_dict(),
                                'best_PSNR': self.best_PSNR
                            },
                            is_best,
                            self.args.exp_name)

                        self.model.scheduler.step(val_losses['total'])
                        self.total_losses = dict()
                        self.epochs_done_in_this_run += 1
Exemple #38
0
                                metavar="DGV_FILE",
                                help="DGV file containing structural variants",
                                required=False)
    rand_dgv_group.add_argument(
        "--sv_prop_het",
        metavar="FLOAT",
        help="Proportion of heterozygous structural variants",
        default=0.6,
        type=float)

    args = main_parser.parse_args()
    args.java = utils.get_java(args.java)
    check_java(args.java)

    utils.JAVA_XMX = utils.JAVA_XMX + args.java_max_mem
    makedirs([args.out_dir])

    # Setup logging
    FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
    loglevel = get_loglevel(args.loglevel)
    if not args.log_to_stderr:
        logging.basicConfig(filename=os.path.join(args.out_dir, "varsim.log"),
                            filemode="w",
                            level=loglevel,
                            format=FORMAT)
    else:
        logging.basicConfig(level=loglevel, format=FORMAT)

    simulator = None if args.disable_sim else args.simulator
    simulator_opts = args.simulator_options
    check_simulator_opts(simulator, simulator_opts)
Exemple #39
0
        if args.binary_mask:
            assert args.loss == 'bce', 'Binary Mask should go with BCE loss'
            args.id += '-binary'
        else:
            args.id += '-ratio'
        if args.weighted_loss:
            args.id += '-weightedLoss'
        args.id += '-channels{}'.format(args.num_channels)
        args.id += '-epoch{}'.format(args.num_epoch)
        args.id += '-step' + '_'.join([str(x) for x in args.lr_steps])

    print('Model ID: {}'.format(args.id))

    # paths to save/load output
    args.ckpt = os.path.join(args.ckpt, args.id)
    args.vis = os.path.join(args.ckpt, 'visualization/')
    if args.mode == 'train':
        makedirs(args.ckpt, remove=True)
    elif args.mode == 'eval':
        args.weights_sound = os.path.join(args.ckpt, 'sound_best.pth')
        args.weights_frame = os.path.join(args.ckpt, 'frame_best.pth')
        args.weights_synthesizer = os.path.join(args.ckpt,
                                                'synthesizer_best.pth')

    # initialize best error with a big number
    args.best_err = float("inf")

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    main(args)
Exemple #40
0
def generate_audio_response(textList, speaker_id, alarm_id):
    #global global_config
    #model_name = os.path.basename(global_config.load_path)
    #iskorean=global_config.is_korean
    audio_clear()

    global member_id, method_id
    if member_id != speaker_id:
        
        if speaker_id == 0:
            if not (member_id==0):
                if member_id != -1:
                    synthesizer.close()
                synthesizer.load('logs/backup_log/son+yuinna', 2)
        elif speaker_id == 3:
            if not (member_id==3):
                if member_id != -1:
                    synthesizer.close()
                synthesizer.load('logs/backup_log/new_inna+kss+leejh+nandong2',4)
        else:
            if not (member_id==1 or member_id==2 or member_id==4):
                if member_id != -1:
                    synthesizer.close()
                synthesizer.load('logs/backup_log/new_inna+kss+leejh', 3)
       
        member_id = speaker_id      

    if speaker_id==0:
        model_name='손석희'
        #speaker_id=0        
    elif speaker_id==1:
        model_name='유인나'
        speaker_id=0
    elif speaker_id==2:
        model_name='코퍼스' #한국어 코퍼스
        speaker_id=1
    elif speaker_id==3:
        model_name='김난희'
        #speaker_id=3
    else:
        model_name='이주형'
        speaker_id=2

    ###########################################################################################
    # 이 부분 반목문 돌림

    textcnt = 0 # 몇번째 텍스트인지 확인 용도
    audio_list = [] #체크 용도
    print(textList)
    for text in textList:
        # hashed_text = hashlib.md5(text.encode('utf-8')).hexdigest() # 텍스트 
        hashed_text = "{}".format(str(textcnt))
        
        # 이 부분을 반복문
        # 이 부분이 경로 생성 하는 부분
        relative_dir_path = os.path.join(AUDIO_DIR, model_name)
        relative_audio_path = os.path.join(
                relative_dir_path, "{}.{}.wav".format(hashed_text, speaker_id))
        real_path = os.path.join(ROOT_PATH, relative_audio_path)
        
        makedirs(os.path.dirname(real_path))
        
        if not os.path.exists(add_postfix(real_path, 0)):
            try:
                #audio는 파일명임
                audio = synthesizer.synthesize(
                        [text], paths=[real_path], speaker_ids=[speaker_id],
                        attention_trim=True)[0]
                audio_list.append(audio)
            except:
                return jsonify(success=False), 400
            
        
        textcnt +=1
    
    ###########################################################################################

    # 음성 합치기
    # 합친 음성 이름은 'output.wav'
    CUR_PATH = os.getcwd()
    #print(CUR_PATH) # audio 이름 체크용
    FILE_PATH = os.path.join(AUDIO_PATH, model_name)
    #print(FILE_PATH) # audio 이름 체크용
    print("method {} 실행중".format(method_id))
    alarm_type = 0
    alarm_id -= 1

    if (method_id == 1) or (method_id == 2):  # basic
        combine_audio(os.path.join(CUR_PATH, FILE_PATH))
    elif method_id == 3:  # morning_call
        combine_audio(os.path.join(CUR_PATH, FILE_PATH))  # web\audio\model_name\output.wav
        if alarm_id == 0 or alarm_id == 1 or alarm_id == 2 or alarm_id == 3:
            alarm_type = 0
        else:
            alarm_id = (alarm_id - 4)
            alarm_type = 1
        create_alarm(alarm_id, model_name, alarm_type) # bgm_select, model_name, type
    elif method_id == 4:  # briefing
        combine_audio(os.path.join(CUR_PATH, FILE_PATH))  # web\audio\model_name\output.wav
        create_briefing(alarm_id, model_name) # bgm_select, model_name, #0 1 2 3
    elif method_id == 5:  # birthday
        combine_audio(os.path.join(CUR_PATH, FILE_PATH))  # web\audio\model_name\output.wav
        create_birthday(0, model_name) # bgm_select, model_name, #0 1 2 3

    #print(os.path.join(CUR_PATH, FILE_PATH))
    #print(TEST_PATH)

    ###########################################################################################

       return send_file(
        os.path.join('audio', model_name, 'output.wav'),
        mimetype="audio/wav",
        as_attachment=True,
        attachment_filename=hashed_text + ".wav")

    ###########################################################################################

    # 합친 파일 불러와서 audio에 넣기
    response = make_response(os.path.join('web', 'audio', model_name, 'output.wav'))
    response.headers['Content-Type'] = 'audio/wav'
    response.headers['Content-Disposition'] = 'attachment; filename=sound.wav'
    return response
    parser.add_argument('--load_path', required=True)
    parser.add_argument('--sample_dir_name', default="samples")
    parser.add_argument('--text', required=True)
    parser.add_argument('--num_speakers', default=1, type=int)
    parser.add_argument('--speaker_id', default=0, type=int)
    parser.add_argument('--checkpoint_step', default=None, type=int)
    parser.add_argument('--is_korean', default=True, type=str2bool)

    # Added parts
    # add for manipulating attention module result
    parser.add_argument('--manual_attention_mode', default=0, type=int, choices=[0, 1, 2, 3])
    # parser.add_argument('--get_base_alignment_path', default=None, type=int, choices=[1])

    config = parser.parse_args()

    makedirs(config.sample_dir_name)

    synthesizer = Synthesizer()
    synthesizer.load(config.load_path, config.num_speakers, config.checkpoint_step)


    # if config.get_base_alignment_path is not None:
    #     base_alignment_path = os.path.abspath('')
    # else:
    #     base_alignment_path = config.get_base_alignment_path

    if config.manual_attention_mode > 0:
        base_alignment_path = os.path.abspath('')
    else:
        base_alignment_path = None
Exemple #42
0
import sys

sys.path.append('..')

import os

import report
import merger
import utils

tag = "1_30_0"

utils.makedirs("results")

all_xml_file = "results/all.xml"
all_xml_writer = open(all_xml_file, "w")
merger.merge_test_runs(".", tag, all_xml_writer, 1)
all_xml_writer.close()

report.make_result_pages(
    test_results_file=os.path.abspath(all_xml_file),
    expected_results_file="",
    failures_markup_file=os.path.abspath("explicit-failures-markup.xml"),
    tag=tag,
    run_date="Today date",
    comment_file=os.path.abspath("comment.html"),
    results_dir="results",
    result_prefix="",
    reports=["x", "ds", "dd"],
    v2=1)
Exemple #43
0
def output_visuals(vis_rows, batch_data, outputs, args):
    # fetch data and predictions
    mag_mix = batch_data['mag_mix']
    phase_mix = batch_data['phase_mix']
    frames = batch_data['frames']
    infos = batch_data['infos']

    pred_masks_ = outputs['pred_masks']
    gt_masks_ = outputs['gt_masks']
    mag_mix_ = outputs['mag_mix']
    weight_ = outputs['weight']

    # unwarp log scale
    N = args.num_mix
    B = mag_mix.size(0)
    pred_masks_linear = [None for n in range(N)]
    gt_masks_linear = [None for n in range(N)]
    for n in range(N):
        if args.log_freq:
            grid_unwarp = torch.from_numpy(
                warpgrid(B,
                         args.stft_frame // 2 + 1,
                         gt_masks_[0].size(3),
                         warp=False)).to(args.device)
            pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
            gt_masks_linear[n] = F.grid_sample(gt_masks_[n], grid_unwarp)
        else:
            pred_masks_linear[n] = pred_masks_[n]
            gt_masks_linear[n] = gt_masks_[n]

    # convert into numpy
    mag_mix = mag_mix.numpy()
    mag_mix_ = mag_mix_.detach().cpu().numpy()
    phase_mix = phase_mix.numpy()
    weight_ = weight_.detach().cpu().numpy()
    for n in range(N):
        pred_masks_[n] = pred_masks_[n].detach().cpu().numpy()
        pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
        gt_masks_[n] = gt_masks_[n].detach().cpu().numpy()
        gt_masks_linear[n] = gt_masks_linear[n].detach().cpu().numpy()

        # threshold if binary mask
        if args.binary_mask:
            pred_masks_[n] = (pred_masks_[n] > args.mask_thres).astype(
                np.float32)
            pred_masks_linear[n] = (pred_masks_linear[n] >
                                    args.mask_thres).astype(np.float32)

    # loop over each sample
    for j in range(B):
        row_elements = []

        # video names
        prefix = []
        for n in range(N):
            prefix.append('-'.join(
                infos[n][0][j].split('/')[-2:]).split('.')[0])
        prefix = '+'.join(prefix)
        makedirs(os.path.join(args.vis, prefix))

        # save mixture
        mix_wav = istft_reconstruction(mag_mix[j, 0],
                                       phase_mix[j, 0],
                                       hop_length=args.stft_hop)
        mix_amp = magnitude2heatmap(mag_mix_[j, 0])
        weight = magnitude2heatmap(weight_[j, 0], log=False, scale=100.)
        filename_mixwav = os.path.join(prefix, 'mix.wav')
        filename_mixmag = os.path.join(prefix, 'mix.jpg')
        filename_weight = os.path.join(prefix, 'weight.jpg')
        imageio.imwrite(os.path.join(args.vis, filename_mixmag),
                        mix_amp[::-1, :, :])
        imageio.imwrite(os.path.join(args.vis, filename_weight),
                        weight[::-1, :])
        wavfile.write(os.path.join(args.vis, filename_mixwav), args.audRate,
                      mix_wav)
        row_elements += [{
            'text': prefix
        }, {
            'image': filename_mixmag,
            'audio': filename_mixwav
        }]

        # save each component
        preds_wav = [None for n in range(N)]
        for n in range(N):
            # GT and predicted audio recovery
            gt_mag = mag_mix[j, 0] * gt_masks_linear[n][j, 0]
            gt_wav = istft_reconstruction(gt_mag,
                                          phase_mix[j, 0],
                                          hop_length=args.stft_hop)
            pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
            preds_wav[n] = istft_reconstruction(pred_mag,
                                                phase_mix[j, 0],
                                                hop_length=args.stft_hop)

            # output masks
            filename_gtmask = os.path.join(prefix,
                                           'gtmask{}.jpg'.format(n + 1))
            filename_predmask = os.path.join(prefix,
                                             'predmask{}.jpg'.format(n + 1))
            gt_mask = (np.clip(gt_masks_[n][j, 0], 0, 1) * 255).astype(
                np.uint8)
            pred_mask = (np.clip(pred_masks_[n][j, 0], 0, 1) * 255).astype(
                np.uint8)
            imageio.imwrite(os.path.join(args.vis, filename_gtmask),
                            gt_mask[::-1, :])
            imageio.imwrite(os.path.join(args.vis, filename_predmask),
                            pred_mask[::-1, :])

            # ouput spectrogram (log of magnitude, show colormap)
            filename_gtmag = os.path.join(prefix, 'gtamp{}.jpg'.format(n + 1))
            filename_predmag = os.path.join(prefix,
                                            'predamp{}.jpg'.format(n + 1))
            gt_mag = magnitude2heatmap(gt_mag)
            pred_mag = magnitude2heatmap(pred_mag)
            imageio.imwrite(os.path.join(args.vis, filename_gtmag),
                            gt_mag[::-1, :, :])
            imageio.imwrite(os.path.join(args.vis, filename_predmag),
                            pred_mag[::-1, :, :])

            # output audio
            filename_gtwav = os.path.join(prefix, 'gt{}.wav'.format(n + 1))
            filename_predwav = os.path.join(prefix, 'pred{}.wav'.format(n + 1))
            wavfile.write(os.path.join(args.vis, filename_gtwav), args.audRate,
                          gt_wav)
            wavfile.write(os.path.join(args.vis, filename_predwav),
                          args.audRate, preds_wav[n])

            # output video
            frames_tensor = [
                recover_rgb(frames[n][j, :, t]) for t in range(args.num_frames)
            ]
            frames_tensor = np.asarray(frames_tensor)
            path_video = os.path.join(args.vis, prefix,
                                      'video{}.mp4'.format(n + 1))
            save_video(path_video,
                       frames_tensor,
                       fps=args.frameRate / args.stride_frames)

            # combine gt video and audio
            filename_av = os.path.join(prefix, 'av{}.mp4'.format(n + 1))
            combine_video_audio(path_video,
                                os.path.join(args.vis, filename_gtwav),
                                os.path.join(args.vis, filename_av))

            row_elements += [{
                'video': filename_av
            }, {
                'image': filename_predmag,
                'audio': filename_predwav
            }, {
                'image': filename_gtmag,
                'audio': filename_gtwav
            }, {
                'image': filename_predmask
            }, {
                'image': filename_gtmask
            }]

        row_elements += [{'image': filename_weight}]
        vis_rows.append(row_elements)
Exemple #44
0
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_virtual_device_configuration(
    gpus[0],
    [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=256)])

parser = argparse.ArgumentParser()
parser.add_argument('--dataset_size', type=int, default=10)
parser.add_argument('--lr', type=float, default=3e-2)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--batch_time', type=int, default=16)
args = parser.parse_args()

PLOT_DIR = 'plots/mass_spring_damper/lstm/'
TIME_OF_RUN = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

makedirs(PLOT_DIR)

if not os.path.isfile('experiments/datasets/mass_spring_damper_x_train.npy'):
    x_train, _, x_val, _ = create_dataset()
x_train, _, x_val, _ = load_dataset()
# Offset input/output by one timestep
x_val_ref = x_val
y_train = x_train[:, 1:]
x_train = x_train[:, :-1]
y_val = x_val[:, 1:]
x_val = x_val[:, :-1]


class TrainDatagen(tf.keras.utils.Sequence):
    def __len__(self):
        return x_train.shape[0] * x_train.shape[1] // args.batch_size
    except OSError, e:
        pass

    # NOTE: added /sys and /dev b/c some nodes fail due to this when disk is
    # nearly full.
    utils.sysexec_noerr("umount {}/proc".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}/mnt/cdrom".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}/vservers".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}/sys".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}/dev".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}".format(SYSIMG_PATH), log)
    vars['ROOT_MOUNTED'] = 0

    #    log.write( "Removing any old files, directories\n" )
    #    utils.removedir(TEMP_PATH)

    log.write("Cleaning up any existing PlanetLab config files\n")
    try:
        flist = os.listdir(PLCONF_DIR)
        for file in flist:
            utils.removedir(file)
    except OSError:
        pass

    # create the temp path and sysimg path. since sysimg
    # path is in temp path, both are created here
    log.write("Creating system image path\n")
    utils.makedirs(SYSIMG_PATH)

    return 1
Exemple #46
0
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report

from utils import load_data, accuracy, dense_tensor_to_sparse, resample, makedirs
from utils_inductive import transform_dataset_by_idx
from models import HGAT
import os, gc, sys
from print_log import Logger

logdir = "log/"
savedir = 'model/'
embdir = 'embeddings/'
makedirs([logdir, savedir, embdir])

os.environ["CUDA_VISIBLE_DEVICES"] = "2"

write_embeddings = True
HOP = 2

dataset = 'example'

LR = 0.01 if dataset == 'snippets' else 0.005
DP = 0.95 if dataset in ['agnews', 'tagmynews'] else 0.8
WD = 0 if dataset == 'snippets' else 5e-6
LR = 0.05 if 'multi' in dataset else LR
DP = 0.5 if 'multi' in dataset else DP
WD = 0 if 'multi' in dataset else WD
Exemple #47
0
def Run(vars, upgrade, log):
    """
    Download core + extensions bootstrapfs tarballs and install on the hard drive

    the upgrade boolean is True when we are upgrading a node root install while 
    preserving its slice contents; in that case we just perform extra cleanup
    before unwrapping the bootstrapfs
    this is because the running system may have extraneous files
    that is to say, files that are *not* present in the bootstrapfs
    and that can impact/clobber the resulting upgrade
    
    Expect the following variables from the store:
    SYSIMG_PATH          the path where the system image will be mounted
    PARTITIONS           dictionary of generic part. types (root/swap)
                         and their associated devices.
    NODE_ID              the id of this machine
    
    Sets the following variables:
    TEMP_BOOTCD_PATH     where the boot cd is remounted in the temp
                         path
    ROOT_MOUNTED         set to 1 when the the base logical volumes
                         are mounted.
    """

    log.write("\n\nStep: Install: bootstrapfs tarball (upgrade={}).\n".format(
        upgrade))

    # make sure we have the variables we need
    try:
        SYSIMG_PATH = vars["SYSIMG_PATH"]
        if SYSIMG_PATH == "":
            raise ValueError("SYSIMG_PATH")

        PARTITIONS = vars["PARTITIONS"]
        if PARTITIONS == None:
            raise ValueError("PARTITIONS")

        NODE_ID = vars["NODE_ID"]
        if NODE_ID == "":
            raise ValueError("NODE_ID")

        VERSION = vars['VERSION'] or 'unknown'

    except KeyError as var:
        raise BootManagerException(
            "Missing variable in vars: {}\n".format(var))
    except ValueError as var:
        raise BootManagerException(
            "Variable in vars, shouldn't be: {}\n".format(var))

    try:
        # make sure the required partitions exist
        val = PARTITIONS["root"]
        val = PARTITIONS["swap"]
        val = PARTITIONS["vservers"]
    except KeyError as part:
        log.write("Missing partition in PARTITIONS: {}\n".format(part))
        return 0

    bs_request = BootServerRequest.BootServerRequest(vars)

    # in upgrade mode, since we skip InstallPartitionDisks
    # we need to run this
    if upgrade:
        log.write("Upgrade mode init : Scanning for devices\n")
        systeminfo.get_block_devices_dict(vars, log)
        utils.sysexec_noerr("vgscan --mknodes", log)
        utils.sysexec_noerr("vgchange -ay", log)

    # debugging info - show in either mode
    utils.display_disks_status(PARTITIONS, "In InstallBootstrapFS", log)

    utils.breakpoint("we need to make /dev/mapper/* appear")

    log.write("turning on swap space\n")
    utils.sysexec("swapon {}".format(PARTITIONS["swap"]), log)

    # make sure the sysimg dir is present
    utils.makedirs(SYSIMG_PATH)

    log.write("mounting root file system\n")
    utils.sysexec(
        "mount -t ext3 {} {}".format(PARTITIONS["root"], SYSIMG_PATH), log)

    fstype = 'ext3' if vars['virt'] == 'vs' else 'btrfs'

    one_partition = vars['ONE_PARTITION'] == '1'

    if (not one_partition):
        log.write("mounting vserver partition in root file system (type {})\n".
                  format(fstype))
        utils.makedirs(SYSIMG_PATH + "/vservers")
        utils.sysexec("mount -t {} {} {}/vservers"\
                      .format(fstype, PARTITIONS["vservers"], SYSIMG_PATH), log)

        #if vars['virt']=='lxc':
        # NOTE: btrfs quota is supported from version: >= btrfs-progs-0.20 (f18+)
        #       older versions will not recongize the 'quota' command.
        #log.write("Enabling btrfs quota on {}/vservers\n".format(SYSIMG_PATH))
        #utils.sysexec_noerr("btrfs quota enable {}/vservers".format(SYSIMG_PATH))

    vars['ROOT_MOUNTED'] = 1

    # this is now retrieved in GetAndUpdateNodeDetails
    nodefamily = vars['nodefamily']
    extensions = vars['extensions']

    # in upgrade mode: we need to cleanup the disk to make
    # it safe to just untar the new bootstrapfs tarball again
    # on top of the hard drive
    if upgrade:
        CleanupSysimgBeforeUpgrade(SYSIMG_PATH, nodefamily, log)

    # the 'plain' option is for tests mostly
    plain = vars['plain']
    if plain:
        download_suffix = ".tar"
        uncompress_option = ""
        log.write("Using plain bootstrapfs images\n")
    else:
        download_suffix = ".tar.bz2"
        uncompress_option = "-j"
        log.write("Using compressed bootstrapfs images\n")

    log.write("Using nodefamily={}\n".format(nodefamily))
    if not extensions:
        log.write("Installing only core software\n")
    else:
        log.write("Requested extensions {}\n".format(extensions))

    bootstrapfs_names = [nodefamily] + extensions

    for name in bootstrapfs_names:
        tarball = "bootstrapfs-{}{}".format(name, download_suffix)
        source_file = "/boot/{}".format(tarball)
        dest_file = "{}/{}".format(SYSIMG_PATH, tarball)

        source_hash_file = "/boot/{}.sha1sum".format(tarball)
        dest_hash_file = "{}/{}.sha1sum".format(SYSIMG_PATH, tarball)

        time_beg = time.time()
        log.write("downloading {}\n".format(source_file))
        # 120 is the connect timeout, 86400 is the max transfer time in
        # seconds (1 day)
        result = bs_request.DownloadFile(source_file, None, None, 1, 1,
                                         dest_file, 120, 86400)
        time_end = time.time()
        duration = int(time_end - time_beg)
        log.write("Done downloading ({} seconds)\n".format(duration))
        if result:
            # Download SHA1 checksum file
            log.write("downloading sha1sum for {}\n".format(source_file))
            result = bs_request.DownloadFile(source_hash_file, None, None, 1,
                                             1, dest_hash_file, 120, 86400)

            log.write("verifying sha1sum for {}\n".format(source_file))
            if not utils.check_file_hash(dest_file, dest_hash_file):
                raise BootManagerException(
                    "FATAL: SHA1 checksum does not match between {} and {}"\
                    .format(source_file, source_hash_file))

            time_beg = time.time()
            log.write("extracting {} in {}\n".format(dest_file, SYSIMG_PATH))
            result = utils.sysexec(
                "tar -C {} -xpf {} {}".format(SYSIMG_PATH, dest_file,
                                              uncompress_option), log)
            time_end = time.time()
            duration = int(time_end - time_beg)
            log.write("Done extracting ({} seconds)\n".format(duration))
            utils.removefile(dest_file)
        else:
            # the main tarball is required
            if name == nodefamily:
                raise BootManagerException(
                    "FATAL: Unable to download main tarball {} from server."\
                    .format(source_file))
            # for extensions, just issue a warning
            else:
                log.write(
                    "WARNING: tarball for extension {} not found\n".format(
                        name))

    # copy resolv.conf from the base system into our temp dir
    # so DNS lookups work correctly while we are chrooted
    log.write("Copying resolv.conf to temp dir\n")
    utils.sysexec("cp /etc/resolv.conf {}/etc/".format(SYSIMG_PATH), log)

    # Copy the boot server certificate(s) and GPG public key to
    # /usr/boot in the temp dir.
    log.write("Copying boot server certificates and public key\n")

    if os.path.exists("/usr/boot"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/usr/boot"):
            utils.makedirs(SYSIMG_PATH + "/usr")
            shutil.copytree("/usr/boot", SYSIMG_PATH + "/usr/boot")
    elif os.path.exists("/usr/bootme"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/usr/bootme"):
            utils.makedirs(SYSIMG_PATH + "/usr/boot")
            boot_server = file("/usr/bootme/BOOTSERVER").readline().strip()
            shutil.copy("/usr/bootme/cacert/" + boot_server + "/cacert.pem",
                        SYSIMG_PATH + "/usr/boot/cacert.pem")
            file(SYSIMG_PATH + "/usr/boot/boot_server", "w").write(boot_server)
            shutil.copy("/usr/bootme/pubring.gpg",
                        SYSIMG_PATH + "/usr/boot/pubring.gpg")

    # For backward compatibility
    if os.path.exists("/usr/bootme"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/mnt/cdrom/bootme"):
            utils.makedirs(SYSIMG_PATH + "/mnt/cdrom")
            shutil.copytree("/usr/bootme", SYSIMG_PATH + "/mnt/cdrom/bootme")

    # ONE_PARTITION => new distribution type
    if (vars['ONE_PARTITION'] != '1'):
        # Import the GPG key into the RPM database so that RPMS can be verified
        utils.makedirs(SYSIMG_PATH + "/etc/pki/rpm-gpg")
        # see also myplc/plc.d/gpg
        utils.sysexec("type -p gpg1 >& /dev/null && GPG=gpg1 || GPG=gpg; "
                      "$GPG --homedir=/root --export --armor"
                      " --no-default-keyring --keyring {}/usr/boot/pubring.gpg"
                      " > {}/etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab".format(
                          SYSIMG_PATH, SYSIMG_PATH),
                      log,
                      shell=True)
        utils.sysexec_chroot(
            SYSIMG_PATH, "rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab",
            log)

    # keep a log on the installed hdd
    stamp = file(SYSIMG_PATH + "/bm-install.txt", 'a')
    now = time.strftime("%Y-%b-%d @ %H:%M %Z", time.gmtime())
    stamp.write("Hard drive installed by BootManager {}\n".format(VERSION))
    stamp.write("Finished extraction of bootstrapfs on {}\n".format(now))
    # do not modify this, the upgrade code uses this line for checking compatibility
    stamp.write("Using nodefamily {}\n".format(nodefamily))
    stamp.close()

    return 1
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    if args.nexpa_train:
        from model.l2_nonexpansive_model import Model
    else:
        from model.model import Model
    model = Model(i_c=1, n_c=10)

    attack = FastGradientSignUntargeted(model,
                                        args.epsilon,
                                        args.alpha,
                                        min_val=0,
                                        max_val=1,
                                        max_iters=args.k,
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':
        tr_dataset = tv.datasets.MNIST(args.data_root,
                                       train=True,
                                       transform=tv.transforms.ToTensor(),
                                       download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.MNIST(args.data_root,
                                       train=False,
                                       transform=tv.transforms.ToTensor(),
                                       download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        pass
    else:
        raise NotImplementedError
Exemple #49
0
 def inner(path):
     root, dirname, basename = utils.tripple(from_directory, path)
     destination = os.path.join(to_directory, dirname, basename)
     utils.makedirs(os.path.join(to_directory, dirname))
     shutil.copy(path, destination)
     return {'files': [destination]}
Exemple #50
0
def Run(vars, log):
    """
    See if a node installation is valid. More checks should certainly be
    done in the future, but for now, make sure that the sym links kernel-boot
    exist in /boot
    
    Expect the following variables to be set:
    SYSIMG_PATH              the path where the system image will be mounted
                             (always starts with TEMP_PATH)
    ROOT_MOUNTED             the node root file system is mounted
    NODE_ID                  The db node_id for this machine
    PLCONF_DIR               The directory to store the configuration file in
    
    Set the following variables upon successfully running:
    ROOT_MOUNTED             the node root file system is mounted
    """

    log.write("\n\nStep: Validating node installation.\n")

    # make sure we have the variables we need
    try:
        SYSIMG_PATH = vars["SYSIMG_PATH"]
        if SYSIMG_PATH == "":
            raise ValueError("SYSIMG_PATH")

        NODE_ID = vars["NODE_ID"]
        if NODE_ID == "":
            raise ValueError("NODE_ID")

        PLCONF_DIR = vars["PLCONF_DIR"]
        if PLCONF_DIR == "":
            raise ValueError("PLCONF_DIR")
        
        NODE_MODEL_OPTIONS = vars["NODE_MODEL_OPTIONS"]

        PARTITIONS = vars["PARTITIONS"]
        if PARTITIONS == None:
            raise ValueError("PARTITIONS")

    except KeyError as var:
        raise BootManagerException("Missing variable in vars: {}\n".format(var))
    except ValueError as var:
        raise BootManagerException("Variable in vars, shouldn't be: {}\n".format(var))


    ROOT_MOUNTED = 0
    if vars.has_key('ROOT_MOUNTED'):
        ROOT_MOUNTED = vars['ROOT_MOUNTED']

    # mount the root system image if we haven't already.
    # capture BootManagerExceptions during the vgscan/change and mount
    # calls, so we can return 0 instead
    if ROOT_MOUNTED == 0:
            
        # simply creating an instance of this class and listing the system
        # block devices will make them show up so vgscan can find the planetlab
        # volume group
        systeminfo.get_block_devices_dict(vars, log)

        try:
            utils.sysexec("vgscan", log)
            utils.sysexec("vgchange -ay planetlab", log)
        except BootManagerException as e:
            log.write("BootManagerException during vgscan/vgchange: {}\n".format(e))
            return 0
            
        utils.makedirs(SYSIMG_PATH)

        # xxx - TODO - need to fsck the btrfs partition
        if vars['virt'] == 'vs':
            filesystems_tocheck = ['root', 'vservers']
        else:
            filesystems_tocheck = ['root']

        for filesystem in filesystems_tocheck:
            try:
                # first run fsck to prevent fs corruption from hanging mount...
                log.write("fsck {} file system\n".format(filesystem))
                utils.sysexec("e2fsck -v -p {}".format(PARTITIONS[filesystem]), log, fsck=True)
            except BootManagerException as e:
                log.write("BootManagerException during fsck of {} ({}) filesystem : {}\n"\
                          .format(filesystem, PARTITIONS[filesystem], str(e)))
                try:
                    log.write("Trying to recover filesystem errors on {}\n".format(filesystem))
                    utils.sysexec("e2fsck -v -y {}".format(PARTITIONS[filesystem]), log, fsck=True)
                except BootManagerException as e:
                    log.write("BootManagerException while trying to recover"
                              "filesystem errors on {} ({}) filesystem : {}\n"
                              .format(filesystem, PARTITIONS[filesystem], str(e)))
                    return -1
            else:
                # disable time/count based filesystems checks
                utils.sysexec_noerr("tune2fs -c -1 -i 0 {}".format(PARTITIONS[filesystem]), log)

        try:
            # then attempt to mount them
            log.write("mounting root file system\n")
            utils.sysexec("mount -t ext3 {} {}".format(PARTITIONS["root"], SYSIMG_PATH),log)
        except BootManagerException as e:
            log.write("BootManagerException during mount of /root: {}\n".format(str(e)))
            return -2
            
        try:
            PROC_PATH = "{}/proc".format(SYSIMG_PATH)
            utils.makedirs(PROC_PATH)
            log.write("mounting /proc\n")
            utils.sysexec("mount -t proc none {}".format(PROC_PATH), log)
        except BootManagerException as e:
            log.write("BootManagerException during mount of /proc: {}\n".format(str(e)))
            return -2


        one_partition = vars['ONE_PARTITION']=='1'

        if (not one_partition):
            try:
                VSERVERS_PATH = "{}/vservers".format(SYSIMG_PATH)
                utils.makedirs(VSERVERS_PATH)
                log.write("mounting vservers partition in root file system\n")
                if vars['virt'] == 'vs':
                    utils.sysexec("mount -t ext3 {} {}".format(PARTITIONS["vservers"], VSERVERS_PATH), log)
                else:
                    utils.sysexec("mount -t btrfs {} {}".format(PARTITIONS["vservers"], VSERVERS_PATH), log)
            except BootManagerException as e:
                log.write("BootManagerException while mounting /vservers: {}\n".format(str(e)))
                return -2

        ROOT_MOUNTED = 1
        vars['ROOT_MOUNTED'] = 1
        
    # check if the base kernel is installed 
    # these 2 links are created by our kernel's post-install scriplet
    log.write("Checking for a custom kernel\n")
    try:
        if vars['virt'] == 'vs':
            os.stat("{}/boot/kernel-boot".format(SYSIMG_PATH))
        else:
            try:
                kversion = os.popen("chroot {} rpm -qa kernel | tail -1 | cut -c 8-"\
                                    .format(SYSIMG_PATH)).read().rstrip()
                os.stat("{}/boot/vmlinuz-{}".format(SYSIMG_PATH, kversion))
                major_version = int(kversion[0]) # Check if the string looks like a kernel version
            except:
                kversion = os.popen("ls -lrt {}/lib/modules | tail -1 | awk '{{print $9;}}'"\
                                    .format(SYSIMG_PATH)).read().rstrip()
    except OSError as e:            
        log.write("Couldn't locate base kernel (you might be using the stock kernel).\n")
        return -3

    # check if the model specified kernel is installed
    option = ''
    if NODE_MODEL_OPTIONS & ModelOptions.SMP:
        option = 'smp'
        try:
            os.stat("{}/boot/kernel-boot{}".format(SYSIMG_PATH, option))
        except OSError as e:
            # smp kernel is not there; remove option from modeloptions
            # such that the rest of the code base thinks we are just
            # using the base kernel.
            NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP
            vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS
            log.write("WARNING: Couldn't locate smp kernel.\n")
            
    # write out the node id to /etc/planetlab/node_id. if this fails, return
    # 0, indicating the node isn't a valid install.
    try:
        node_id_file_path = "{}/{}/node_id".format(SYSIMG_PATH, PLCONF_DIR)
        node_id_file = file(node_id_file_path, "w")
        node_id_file.write(str(NODE_ID))
        node_id_file.close()
        node_id_file = None
        log.write("Updated /etc/planetlab/node_id\n")
    except IOError as e:
        log.write("Unable to write out /etc/planetlab/node_id\n")
        return 0

    log.write("Node installation appears to be ok\n")
    
    return 1
Exemple #51
0
def varsim_multi(reference,
                 simulator,
                 simulator_exe,
                 total_coverage,
                 variant_vcfs=[],
                 sampling_vcf=None,
                 dgv_file=None,
                 regions=None,
                 randvcf_options=None,
                 randdgv_options=None,
                 nlanes=1,
                 simulator_options="",
                 samples=[],
                 out_dir="out",
                 sv_insert_seq=None,
                 seed=0,
                 sex="MALE",
                 remove_filtered=False,
                 keep_temp=False,
                 force_five_base_encoding=False,
                 lift_ref=False,
                 disable_vcf2diploid=False,
                 samples_random=0):
    logger = logging.getLogger(varsim_multi.__name__)

    makedirs([out_dir])

    restricted_dir = os.path.join(out_dir, "restricted")

    restricted_reference, restricted_vcfs = gen_restricted_ref_and_vcfs(
        reference,
        variant_vcfs,
        regions,
        samples,
        restricted_dir,
        flank=0,
        short_contig_names=False)
    dgv_vcf = None

    if dgv_file:
        assert sv_insert_seq, "SV insertion sequence file is required."
        dgv_vcf_dir = os.path.join(out_dir, "tmp")
        makedirs([dgv_vcf_dir])
        dgv_vcf = os.path.join(dgv_vcf_dir, "dgv.vcf")
        makedirs([os.path.join(out_dir, "log")])
        dgv_err_file = os.path.join(out_dir, "log", "dgv2vcf.err")
        randdgv_options2vcf = copy.copy(randdgv_options)
        randdgv_options2vcf.output_all = "-all"
        with open(dgv_vcf, "w") as dgv2vcf_out, open(dgv_err_file,
                                                     "w") as dgv2vcf_log:
            run_randdgv(dgv_file, dgv2vcf_out, dgv2vcf_log, seed, sex,
                        randdgv_options2vcf, reference, sv_insert_seq)

    if regions:
        merged_bed = os.path.join(out_dir, "merged.bed")
        pybedtools.BedTool(regions).merge().saveas(merged_bed)
        restricted_dir = os.path.join(out_dir, "region_restricted")
        if sampling_vcf:
            _, [restricted_sampling_vcf
                ] = gen_restricted_ref_and_vcfs(reference, [sampling_vcf],
                                                merged_bed, [],
                                                restricted_dir,
                                                flank=0)
            # Now lift over the restricted_sampling_vcf to get the region-limited VCF
            sampling_vcf = lift_vcfs([restricted_sampling_vcf],
                                     os.path.join(
                                         restricted_dir,
                                         "region-restricted-sampling.vcf"),
                                     reference)
        if dgv_vcf:
            convertCN([dgv_vcf], "two2one")
            dgv_vcf = sort_and_compress(dgv_vcf)
            _, [restricted_dgv_vcf
                ] = gen_restricted_ref_and_vcfs(reference, [dgv_vcf],
                                                merged_bed, [],
                                                restricted_dir,
                                                flank=0)
            # Now lift over the restricted_dgv_vcf to get the region-limited VCF
            dgv_vcf = lift_vcfs([restricted_dgv_vcf],
                                os.path.join(restricted_dir,
                                             "region-restricted-dgv.vcf"),
                                reference)

    all_samples = samples + ["VarSim%d" % i for i in xrange(samples_random)]

    for index, (sample, coverage) in enumerate(zip(all_samples,
                                                   total_coverage)):
        sample_dir = os.path.join(out_dir, sample)
        sample_seed = seed + 1000 * index
        makedirs([sample_dir])
        logger.info("Simulating sample {} in {}".format(sample, sample_dir))
        sample_variant_vcfs = list(
            restricted_vcfs if index < len(samples) else [])

        # Run RandVCF first to get the sampled variants for the sample
        if randvcf_options and sampling_vcf:
            sampled_vcf = os.path.join(sample_dir, "randvcf.vcf")
            with open(sampled_vcf, "w") as randvcf_out, open(
                    os.path.join(sample_dir, "randvcf.err"),
                    "w") as randvcf_log:
                run_randvcf(sampling_vcf, randvcf_out, randvcf_log,
                            sample_seed, sex, randvcf_options, reference)
            sampled_vcf = sort_and_compress(sampled_vcf)
            # Now generate the restricted sampled VCF for the sample
            _, [restricted_sampled_vcf] = gen_restricted_ref_and_vcfs(
                reference, [sampled_vcf],
                regions, [],
                os.path.join(sample_dir, "restricted_randvcf"),
                flank=0)
            sample_variant_vcfs = sample_variant_vcfs + [
                restricted_sampled_vcf
            ]

        if randdgv_options and dgv_vcf:
            sampled_dgv_vcf = os.path.join(sample_dir, "randdgvvcf.vcf")
            randdgvvcf_options = randdgv_options2randvcf_options(
                randdgv_options)
            with open(sampled_dgv_vcf, "w") as randdgvvcf_out, open(
                    os.path.join(sample_dir, "randdgvvcf.err"),
                    "w") as randdgvvcf_log:
                run_randvcf(dgv_vcf, randdgvvcf_out, randdgvvcf_log,
                            sample_seed, sex, randdgvvcf_options, reference)
            sampled_dgv_vcf = sort_and_compress(sampled_dgv_vcf)
            # Now generate the restricted sampled dgv VCF for the sample
            _, [restricted_sampled_dgv_vcf] = gen_restricted_ref_and_vcfs(
                reference, [sampled_dgv_vcf],
                regions, [],
                os.path.join(sample_dir, "restricted_randdgvvcf"),
                flank=0)
            convertCN([restricted_sampled_dgv_vcf], "one2two")
            sample_variant_vcfs = sample_variant_vcfs + [
                restricted_sampled_dgv_vcf
            ]

        varsim_main(restricted_reference, simulator, simulator_exe, coverage,
                    sample_variant_vcfs, None, dgv_file, None, randdgv_options,
                    nlanes, simulator_options, sample,
                    os.path.join(sample_dir, "log"),
                    os.path.join(sample_dir, "out"), sv_insert_seq,
                    sample_seed, sex, remove_filtered, keep_temp,
                    force_five_base_encoding, lift_ref, disable_vcf2diploid)

    with open(os.path.join(out_dir, "samples.txt"), "w") as samples_fd:
        samples_fd.write("\n".join(all_samples))
Exemple #52
0
def predict(
        ckpt_path,
        should_predict_captions: bool = False,
        # metrics: Mapping[str, Callable] = {
        #     'rc': rc,
        # },
        num_workers: int = 20,
        use_gpu: bool = True,
        model_name: str = "frames",
        dset_name: str = "memento_frames",
        batch_size: int = 1,
        preds_savepath: Optional[str] = None,
        use_val: bool = False,
        debug_n: Optional[int] = None,
        n_mem_repetitions=5):

    print("ckpt path: {}".format(ckpt_path))

    if preds_savepath is None:
        fname = "_" + ("captions"
                       if should_predict_captions else "mems") + ".json"
        preds_savepath = os.path.splitext(
            ckpt_path.replace(cfg.CKPT_DIR, cfg.PREDS_DIR))[0] + fname
        utils.makedirs([os.path.dirname(preds_savepath)])
    print("preds savepath: {}".format(preds_savepath))

    device = utils.set_device()
    print('DEVICE', device)

    # load the ckpt
    print("Loading model from path: {}".format(ckpt_path))
    ckpt = torch.load(ckpt_path)

    # model
    model = get_model(model_name, device)
    model = nn.DataParallel(model)
    model.load_state_dict(ckpt['model_state_dict'])

    model.to(device)
    model.eval()

    print("USING MODEL TYPE {} ON DSET {}".format(model_name, dset_name))

    # data loader
    use_augmentations = (not should_predict_captions) and (n_mem_repetitions >
                                                           1)
    print("Use augmentations?", use_augmentations)
    test_transforms = VIDEO_TRAIN_TRANSFORMS if use_augmentations else VIDEO_TEST_TRANSFORMS
    train, val, test = get_dataset(dset_name, test_transforms=test_transforms)
    ds = val if use_val else test

    if ds is None:
        raise ValueError("No {} set available for this dataset.".format(
            "val" if use_val else "test"))
    ordered_fnames = ds.get_fnames()

    if debug_n is not None:
        ds = Subset(ds, range(debug_n))

    dl = DataLoader(ds,
                    batch_size=batch_size,
                    shuffle=False,
                    num_workers=num_workers)

    # either do mem scores or captions

    if should_predict_captions:
        # load the vocab embedding
        vocab_embedding = cap_utils.get_vocab_embedding()

        # load the vocab itself
        word2idx, idx2word = cap_utils.index_vocab()

        calc_captions(model, dl, ds, batch_size, device, vocab_embedding,
                      idx2word, preds_savepath, ordered_fnames)

    else:
        _calc_mem_scores(model,
                         ckpt_path,
                         dl,
                         ds,
                         batch_size,
                         device,
                         preds_savepath,
                         n_times=n_mem_repetitions)
Exemple #53
0
    parser.add_argument('--burn_in', type=int, default=2000)
    parser.add_argument('--arch', default='mlp', choices=["mlp", "mlp-large"])

    args = parser.parse_args()
    if args.data == "mnist" or args.data == "fashionmnist":
        args.data_dim = 784
        args.data_shape = (1, 28, 28)

    sqrt = lambda x: int(torch.sqrt(torch.Tensor([x])))
    plot = lambda p, x: tv.utils.save_image(
        x.clamp(0, 1), p, normalize=False, nrow=sqrt(x.size(0)))

    dload_train, dload_test = get_data(args)

    # logger
    utils.makedirs(args.save)
    logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'),
                              filepath=os.path.abspath(__file__))

    logger.info(args)

    if args.arch == "mlp":
        if args.quadratic:
            net = networks.QuadraticMLP(args.data_dim, n_hid=args.hidden_dim)
        else:
            net = networks.SmallMLP(args.data_dim,
                                    n_hid=args.hidden_dim,
                                    dropout=args.dropout)

        critic = networks.SmallMLP(args.data_dim,
                                   n_out=args.data_dim,
Exemple #54
0
def make_result_pages(extended_test_results, expected_results_file,
                      failures_markup_file, tag, run_date, comment_file,
                      output_dir, reports, warnings):

    utils.log('Producing the reports...')
    __log__ = 1

    warnings_text = '+'.join(warnings)

    if comment_file != '':
        comment_file = os.path.abspath(comment_file)

    links = os.path.join(output_dir, 'links.html')

    utils.makedirs(os.path.join(output_dir, 'output'))
    for mode in ('developer', 'user'):
        utils.makedirs(os.path.join(output_dir, mode, 'output'))

    issues = os.path.join(output_dir, 'developer', 'issues.html')
    if 'i' in reports:
        utils.log('    Making issues list...')
        utils.libxslt(
            utils.log, extended_test_results, xsl_path('issues_page.xsl'),
            issues, {
                'source': tag,
                'run_date': run_date,
                'warnings': warnings_text,
                'comment_file': comment_file,
                'expected_results_file': expected_results_file,
                'explicit_markup_file': failures_markup_file,
                'release': "yes"
            })

    for mode in ('developer', 'user'):
        if mode[0] + 'd' in reports:
            utils.log('    Making detailed %s  report...' % mode)
            utils.libxslt(
                utils.log, extended_test_results, xsl_path('result_page.xsl'),
                os.path.join(output_dir, mode, 'index.html'), {
                    'links_file': 'links.html',
                    'mode': mode,
                    'source': tag,
                    'run_date': run_date,
                    'warnings': warnings_text,
                    'comment_file': comment_file,
                    'expected_results_file': expected_results_file,
                    'explicit_markup_file': failures_markup_file
                })

    for mode in ('developer', 'user'):
        if mode[0] + 's' in reports:
            utils.log('    Making summary %s  report...' % mode)
            utils.libxslt(
                utils.log, extended_test_results, xsl_path('summary_page.xsl'),
                os.path.join(output_dir, mode, 'summary.html'), {
                    'mode': mode,
                    'source': tag,
                    'run_date': run_date,
                    'warnings': warnings_text,
                    'comment_file': comment_file,
                    'explicit_markup_file': failures_markup_file
                })

    for mode in ('developer', 'user'):
        if mode[0] + 'dr' in reports:
            utils.log('    Making detailed %s release report...' % mode)
            utils.libxslt(
                utils.log, extended_test_results, xsl_path('result_page.xsl'),
                os.path.join(output_dir, mode, 'index_release.html'), {
                    'links_file': 'links.html',
                    'mode': mode,
                    'source': tag,
                    'run_date': run_date,
                    'warnings': warnings_text,
                    'comment_file': comment_file,
                    'expected_results_file': expected_results_file,
                    'explicit_markup_file': failures_markup_file,
                    'release': "yes"
                })

    for mode in ('developer', 'user'):
        if mode[0] + 'sr' in reports:
            utils.log('    Making summary %s release report...' % mode)
            utils.libxslt(
                utils.log, extended_test_results, xsl_path('summary_page.xsl'),
                os.path.join(output_dir, mode, 'summary_release.html'), {
                    'mode': mode,
                    'source': tag,
                    'run_date': run_date,
                    'warnings': warnings_text,
                    'comment_file': comment_file,
                    'explicit_markup_file': failures_markup_file,
                    'release': 'yes'
                })

    if 'e' in reports:
        utils.log('    Generating expected_results ...')
        utils.libxslt(utils.log, extended_test_results,
                      xsl_path('produce_expected_results.xsl'),
                      os.path.join(output_dir, 'expected_results.xml'))

    if 'n' in reports:
        utils.log('    Making runner comment files...')
        utils.libxslt(utils.log, extended_test_results,
                      xsl_path('runners.xsl'),
                      os.path.join(output_dir, 'runners.html'))

    shutil.copyfile(xsl_path('html/master.css'),
                    os.path.join(output_dir, 'master.css'))
Exemple #55
0
def build_from_path(config):
    warning("Sampling rate: {}".format(hparams.sample_rate))

    executor = ProcessPoolExecutor(max_workers=config.num_workers)
    futures = []
    index = 1

    base_dir = os.path.dirname(config.metadata_path)
    data_dir = os.path.join(base_dir, config.data_dirname)
    makedirs(data_dir)

    loss_coeff = defaultdict(one)
    if config.metadata_path.endswith("json"):
        with open(config.metadata_path, encoding="utf-8") as f:
            content = f.read()
        info = json.loads(content)
    elif config.metadata_path.endswith("csv"):
        with open(config.metadata_path) as f:
            info = {}
            for line in f:
                path, text = line.strip().split('|')
                info[path] = text
    else:
        raise Exception(" [!] Unkown metadata format: {}".format(
            config.metadata_path))

    new_info = {}
    for path in info.keys():
        if not os.path.exists(path):
            new_path = os.path.join(base_dir, path)
            if not os.path.exists(new_path):
                print(" [!] Audio not found: {}".format([path, new_path]))
                continue
        else:
            new_path = path

        new_info[new_path] = info[path]

    info = new_info

    for path in info.keys():
        if type(info[path]) == list:
            if hparams.ignore_recognition_level == 1 and len(info[path]) == 1 or \
                    hparams.ignore_recognition_level == 2:
                loss_coeff[path] = hparams.recognition_loss_coeff

            info[path] = info[path][0]

    ignore_description = {
        0: "use all",
        1: "ignore only unmatched_alignment",
        2: "fully ignore recognitio",
    }

    print(" [!] Skip recognition level: {} ({})". \
            format(hparams.ignore_recognition_level,
                   ignore_description[hparams.ignore_recognition_level]))

    for audio_path, text in info.items():
        if hparams.ignore_recognition_level > 0 and loss_coeff[audio_path] != 1:
            continue

        if base_dir not in audio_path:
            audio_path = os.path.join(base_dir, audio_path)

        try:
            tokens = text_to_sequence(text)
        except:
            continue

        fn = partial(_process_utterance, audio_path, data_dir, tokens,
                     loss_coeff[audio_path])
        futures.append(executor.submit(fn))

    n_frames = [future.result() for future in tqdm(futures)]
    n_frames = [n_frame for n_frame in n_frames if n_frame is not None]

    hours = frames_to_hours(n_frames)

    print(' [*] Loaded metadata for {} examples ({:.2f} hours)'.format(
        len(n_frames), hours))
    print(' [*] Max length: {}'.format(max(n_frames)))
    print(' [*] Min length: {}'.format(min(n_frames)))

    plot_n_frames(n_frames, os.path.join(base_dir,
                                         "n_frames_before_filter.png"))

    min_n_frame = hparams.reduction_factor * hparams.min_iters
    max_n_frame = hparams.reduction_factor * hparams.max_iters - hparams.reduction_factor

    n_frames = [n for n in n_frames if min_n_frame <= n <= max_n_frame]
    hours = frames_to_hours(n_frames)

    print(' [*] After filtered: {} examples ({:.2f} hours)'.format(
        len(n_frames), hours))
    print(' [*] Max length: {}'.format(max(n_frames)))
    print(' [*] Min length: {}'.format(min(n_frames)))

    plot_n_frames(n_frames, os.path.join(base_dir,
                                         "n_frames_after_filter.png"))
Exemple #56
0
        experiment_name += f"_ot{args.tpp_otreg_strength}"
        if args.tpp_cond:
            experiment_name += "_cond"
    if args.share_hidden and args.model in ["jumpcnf", "attncnf"
                                            ] and args.tpp == "neural":
        experiment_name += "_sharehidden"
    if args.solve_reverse and args.model == "jumpcnf" and args.tpp == "neural":
        experiment_name += "_rev"
    experiment_name += f"_lr{args.lr}"
    experiment_name += f"_gc{args.gradclip}"
    experiment_name += f"_bsz{args.max_events}x{args.ngpus}_wd{args.weight_decay}_s{args.seed}"
    experiment_name += f"_{args.experiment_id}"
    savepath = os.path.join(args.experiment_dir, experiment_name)

    # Top-level logger for logging exceptions into the log file.
    utils.makedirs(savepath)
    logger = utils.get_logger(os.path.join(savepath, "logs"))

    if args.gradclip == 0:
        args.gradclip = 1e10

    try:
        mp.set_start_method("forkserver")
        mp.spawn(main,
                 args=(args.ngpus, args, savepath),
                 nprocs=args.ngpus,
                 join=True)
    except Exception:
        import traceback
        logger.error(traceback.format_exc())
        sys.exit(1)
def download_news_video_and_content(news_id,
                                    base_dir,
                                    chunk_size=32 * 1024,
                                    video_dir="video",
                                    asset_dir="assets",
                                    audio_dir="wavs"):

    video_dir = os.path.join(base_dir, video_dir)
    asset_dir = os.path.join(base_dir, asset_dir)
    audio_dir = os.path.join(base_dir, audio_dir)

    makedirs(video_dir)
    makedirs(asset_dir)
    makedirs(audio_dir)

    text_path = os.path.join(asset_dir, "{}.txt".format(news_id))
    original_text_path = os.path.join(asset_dir,
                                      "original-{}.txt".format(news_id))

    video_path = os.path.join(video_dir, "{}.ts".format(news_id))
    audio_path = os.path.join(audio_dir, "{}.wav".format(news_id))

    params = {
        'NJC': 'NJC400',
        'NID': news_id,  # NB11515152
        'CD': 'A0100',
    }

    response = requests.request(
        method='GET',
        url=BASE_URL,
        params=params,
    )
    soup = soupify(response.text)

    article_contents = soup.find_all('article_contents')

    assert len(article_contents) == 1, \
            "# of <article_contents> of {} should be 1: {}".format(news_id, response.text)

    text = soupify(article_contents[0].text).get_text()  # remove <div>

    with open(original_text_path, "w", encoding='utf-8') as f:
        f.write(text)

    with open(text_path, "w", encoding='utf-8') as f:
        from nltk import sent_tokenize

        text = re.sub(r'\[.{0,80} :\s.+]', '', text)  # remove quote
        text = re.sub(r'☞.+http.+\)', '', text)  # remove quote
        text = re.sub(r'\(https?:\/\/.*[\r\n]*\)', '', text)  # remove url

        sentences = sent_tokenize(text)
        sentences = [
            sent for sentence in sentences for sent in sentence.split('\n')
            if sent
        ]

        new_texts = []
        for sent in sentences:
            sent = sent.strip()
            sent = re.sub(r'\([^)]*\)', '', sent)
            #sent = re.sub(r'\<.{0,80}\>', '', sent)
            sent = sent.replace('…', '.')
            new_texts.append(sent)

        f.write("\n".join([sent for sent in new_texts if sent]))

    vod_paths = soup.find_all('vod_path')

    assert len(vod_paths) == 1, \
            "# of <vod_path> of {} should be 1: {}".format(news_id, response.text)

    if not os.path.exists(video_path):
        redirect_url = soup.find_all('vod_path')[0].text

        list_url = m3u8.load(redirect_url).playlists[0].absolute_uri
        video_urls = [
            segment.absolute_uri for segment in m3u8.load(list_url).segments
        ]

        with open(video_path, "wb") as f:
            for url in video_urls:
                response = requests.get(url, stream=True)
                total_size = int(response.headers.get('content-length', 0))

                for chunk in response.iter_content(chunk_size):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)

    if not os.path.exists(audio_path):
        encoder = get_encoder_name()
        command = "{} -y -loglevel panic -i {} -ab 160k -ac 2 -ar 44100 -vn {}".\
                format(encoder, video_path, audio_path)
        subprocess.call(command, shell=True)

    return True
    # constants
    ssh_source_files = "%s/debug_files/" % BM_SOURCE_DIR    
    ssh_dir = "/etc/ssh/"
    key_gen_prog = "ssh-keygen"
    ssh_home = "/root/.ssh"
    cancel_boot_flag = "/tmp/CANCEL_BOOT"
    sshd_started_flag = "/tmp/SSHD_RUNNING"

    # pre-sshd
    pre_sshd_script = os.path.join(ssh_source_files, "pre-sshd")
    if os.path.exists(pre_sshd_script):
        utils.sysexec_noerr(pre_sshd_script, log)
    
    # create host keys if needed
    if not os.path.isdir (ssh_dir):
        utils.makedirs (ssh_dir)

    # original code used to specify -b 1024 for the rsa1 key
    # fedora23 seems to come with a release of openssh that lacks suppport
    # for ssh1, and thus rsa1 keys; so we consider that failing to produce
    # the rsa1 key is not a showstopper
    key_specs = [
        ("/etc/ssh/ssh_host_key",     'rsa1', "SSH1 RSA", False),
        ("/etc/ssh/ssh_host_rsa_key", 'rsa',  "SSH2 RSA", True),
        ("/etc/ssh/ssh_host_dsa_key", 'dsa',  "SSH2 DSA", True),
    ]

    for key_file, key_type, label, mandatory in key_specs:
        if not os.path.exists(key_file):
            log.write("Creating {} host key {}\n".format(label, key_file))
            if mandatory:
Exemple #59
0
                                         '.txt', '.original.mp3')
        out_path = os.path.join(base_dir, 'audio',
                                os.path.basename(d.text_path)).replace(
                                    '.txt', '.wav')

        options = {
            'format':
            'bestaudio/best',
            'outtmpl':
            original_path,
            'postprocessors': [{
                'key': 'FFmpegExtractAudio',
                'preferredcodec': 'mp3',
                'preferredquality': '320',
            }],
        }
        with youtube_dl.YoutubeDL(options) as ydl:
            ydl.download([d.video_url])

        audio = AudioSegment.from_file(original_path)
        audio[d.start:d.end].export(out_path, out_ext)

        remove_file(original_path)


if __name__ == '__main__':
    makedirs(os.path.join(base_dir, "audio"))

    data = read_csv(os.path.join(base_dir, "metadata.csv"))
    download_audio_with_urls(data)
Exemple #60
0
def evaluate(netWrapper, loader, history, epoch, args):
    print('Evaluating at {} epochs...'.format(epoch))
    torch.set_grad_enabled(False)

    # remove previous viz results
    makedirs(args.vis, remove=True)

    # switch to eval mode
    netWrapper.eval()

    # initialize meters
    loss_meter = AverageMeter()
    sdr_mix_meter = AverageMeter()
    sdr_meter = AverageMeter()
    sir_meter = AverageMeter()
    sar_meter = AverageMeter()

    # initialize HTML header
    visualizer = HTMLVisualizer(os.path.join(args.vis, 'index.html'))
    header = ['Filename', 'Input Mixed Audio']
    for n in range(1, args.num_mix + 1):
        header += [
            'Video {:d}'.format(n), 'Predicted Audio {:d}'.format(n),
            'GroundTruth Audio {}'.format(n), 'Predicted Mask {}'.format(n),
            'GroundTruth Mask {}'.format(n)
        ]
    header += ['Loss weighting']
    visualizer.add_header(header)
    vis_rows = []

    for i, batch_data in enumerate(loader):
        # forward pass
        err, outputs = netWrapper.forward(batch_data, args)
        err = err.mean()

        loss_meter.update(err.item())
        print('[Eval] iter {}, loss: {:.4f}'.format(i, err.item()))

        # calculate metrics
        sdr_mix, sdr, sir, sar = calc_metrics(batch_data, outputs, args)
        sdr_mix_meter.update(sdr_mix)
        sdr_meter.update(sdr)
        sir_meter.update(sir)
        sar_meter.update(sar)

        # output visualization
        if len(vis_rows) < args.num_vis:
            output_visuals(vis_rows, batch_data, outputs, args)

    print('[Eval Summary] Epoch: {}, Loss: {:.4f}, '
          'SDR_mixture: {:.4f}, SDR: {:.4f}, SIR: {:.4f}, SAR: {:.4f}'.format(
              epoch, loss_meter.average(), sdr_mix_meter.average(),
              sdr_meter.average(), sir_meter.average(), sar_meter.average()))
    history['val']['epoch'].append(epoch)
    history['val']['err'].append(loss_meter.average())
    history['val']['sdr'].append(sdr_meter.average())
    history['val']['sir'].append(sir_meter.average())
    history['val']['sar'].append(sar_meter.average())

    print('Plotting html for visualization...')
    visualizer.add_rows(vis_rows)
    visualizer.write_html()

    # Plot figure
    if epoch > 0:
        print('Plotting figures...')
        plot_loss_metrics(args.ckpt, history)