예제 #1
0
    def start_build_audio(self,
                          speech_paths,
                          mix_path,
                          clean_path,
                          noise_paths=None):
        for paths in tqdm(self.split_speech(speech_paths),
                          desc="Data building"):
            try:
                cleans = []
                for p in paths:
                    w, _ = librosa.load(p, sr=self.config.audio.sample_rate)
                    L = int(self.config.audio.sample_rate *
                            self.config.audio.len)
                    w = w[:L]
                    cleans.append([p, w])

                mix = None
                for i, w in enumerate(cleans):
                    if i == 0:
                        mix = list(w[1])
                    else:
                        mix += w[1]

                if noise_paths is not None:
                    mix = self.add_noise(mix, noise_paths)

                norm = np.max(np.abs(mix)) * 1.1
                mix /= norm
                for i in range(len(cleans)):
                    cleans[i][1] = cleans[i][1] / norm

                mix_filename = ""
                for p in paths:
                    mix_filename += utils.basename(p) + "."
                np.save(("{}/{}npy").format(mix_path, mix_filename),
                        self.audio_handler.wav2spec(mix))

                for w in cleans:
                    clean_filename = utils.basename(w[0]) + ".npy"
                    np.save(("{}/{}").format(clean_path, clean_filename),
                            self.audio_handler.wav2spec(w[1]))

            except Exception:
                try:
                    os.remove(("{}/{}npy").format(mix_path, mix_filename))
                    print("[ERROR] remove " +
                          ("{}/{}npy").format(mix_path, mix_filename))
                except Exception:
                    pass
예제 #2
0
    def constructTitle(self):
        smiley = ":-)" if self.status() == Result.NoError else ":-("

        return "{0}: {1} on {2} testing {3} - {4} {5}".format(
            self._revision, utils.basename(self._installerSourceLocation),
            self._vm.name(), self._testcase.name(), self.statusAsNiceString(),
            smiley)
예제 #3
0
def fromVMRunAndPath(vmrun, path):
    config = ConfigParser.SafeConfigParser()
    config.read(path)

    hostType = utils.get_config_option(config, None, "type", "", "Host")
    hostLocation = utils.get_config_option(config, None, "location", "",
                                           "Host")
    hostUsername = utils.get_config_option(config, None, "username", "",
                                           "Host")
    hostPassword = utils.get_config_option(config, None, "password", "",
                                           "Host")

    vmxVal = utils.get_config_option(config, None, "vmx")
    if not len(hostType) == 0:
        vmx = utils.makeAbsolutePath(vmxVal, os.path.dirname(path))
    else:
        vmx = vmxVal

    username = utils.get_config_option(config, None, "username", "nokia")
    password = utils.get_config_option(config, None, "password", "nokia")
    tempDir = utils.get_config_option(config, None, "tempDir",
                                      "c:\\windows\\temp")
    ostype = utils.get_config_option(config, None, "os", "windows")

    vm = VirtualMachine(vmrun, vmx, username, password, tempDir, ostype)
    vm._name = utils.get_config_option(config, None, "name",
                                       utils.basename(path))
    vm._snapshot = utils.get_config_option(config, None, "snapshot", "base")
    vm._python = utils.get_config_option(config, None, "python",
                                         "c:/python26/python.exe")
    vm._hostType = hostType
    vm._hostLocation = hostLocation
    vm._hostUsername = hostUsername
    vm._hostPassword = hostPassword
    return vm
예제 #4
0
 def __init__( self, path ):
     self._steps = []
     config = ConfigParser.SafeConfigParser()
     config.read( path )
     self._path = path
     self._platforms = []
     found = True
     stepNum = 0
     while found:
         sec = "Step{0}".format( stepNum )
         if not config.has_section( sec ):
             found = False
             continue
         
         stepNum += 1
         installscript = utils.makeAbsolutePath( utils.get_config_option( config, None, "installscript", None, sec ), os.path.dirname( path ) )
         checkerTestDir = utils.get_config_option( config, None, "checkerTestDir", None, sec )
         checkerTestDir = utils.makeAbsolutePath( checkerTestDir, os.path.dirname( path ) ) if checkerTestDir else ""
         timeout = int( utils.get_config_option( config, None, "timeout", 60 * 60, sec ) )
         self._steps.append( Step( installscript, checkerTestDir, timeout ) )
          
     self._name = utils.get_config_option( config, None, "name", utils.basename( path ) )
     self._targetDirectory = utils.get_config_option( config, None, "targetDirectory", "" )
     self._maintenanceToolLocation = utils.get_config_option( config, None, "maintenanceToolLocation", "" )
     platforms = utils.get_config_option( config, None, "platforms" )
     if platforms != None:
         self._platforms = platforms.split( ',' )
def fromVMRunAndPath( vmrun, path ):
    config = ConfigParser.SafeConfigParser()
    config.read( path )

    hostType = utils.get_config_option( config, None, "type", "", "Host" )
    hostLocation = utils.get_config_option( config, None, "location", "", "Host" )
    hostUsername = utils.get_config_option( config, None, "username", "", "Host" )
    hostPassword = utils.get_config_option( config, None, "password", "", "Host" )

    vmxVal = utils.get_config_option( config, None, "vmx" )
    if not len( hostType ) == 0:
        vmx = utils.makeAbsolutePath( vmxVal, os.path.dirname( path ) )
    else:
        vmx = vmxVal

    username = utils.get_config_option( config, None, "username", "nokia" )
    password = utils.get_config_option( config, None, "password", "nokia" )
    tempDir = utils.get_config_option( config, None, "tempDir", "c:\\windows\\temp" )
    ostype = utils.get_config_option( config, None, "os", "windows" )
    
    vm = VirtualMachine( vmrun, vmx, username, password, tempDir, ostype )
    vm._name = utils.get_config_option( config, None, "name", utils.basename( path ) )
    vm._snapshot = utils.get_config_option( config, None, "snapshot", "base" )
    vm._python = utils.get_config_option( config, None, "python", "c:/python26/python.exe" )
    vm._hostType = hostType
    vm._hostLocation = hostLocation
    vm._hostUsername = hostUsername
    vm._hostPassword = hostPassword
    return vm
예제 #6
0
 def __init__( self, path ):
     self._steps = []
     config = ConfigParser.SafeConfigParser()
     config.read( path )
     self._path = path
     self._platforms = []
     found = True
     stepNum = 0
     while found:
         sec = "Step{0}".format( stepNum )
         if not config.has_section( sec ):
             found = False
             continue
         
         stepNum += 1
         installscript = utils.makeAbsolutePath( utils.get_config_option( config, None, "installscript", None, sec ), os.path.dirname( path ) )
         checkerTestDir = utils.get_config_option( config, None, "checkerTestDir", None, sec )
         checkerTestDir = utils.makeAbsolutePath( checkerTestDir, os.path.dirname( path ) ) if checkerTestDir else ""
         timeout = int( utils.get_config_option( config, None, "timeout", 60 * 60, sec ) )
         self._steps.append( Step( installscript, checkerTestDir, timeout ) )
          
     self._name = utils.get_config_option( config, None, "name", utils.basename( path ) )
     self._targetDirectory = utils.get_config_option( config, None, "targetDirectory", "" )
     self._maintenanceToolLocation = utils.get_config_option( config, None, "maintenanceToolLocation", "" )
     platforms = utils.get_config_option( config, None, "platforms" )
     if platforms != None:
         self._platforms = platforms.split( ',' )
예제 #7
0
파일: audio.py 프로젝트: inkerra/python-vk
def download_audio_list(th_pool_sz, audio_list, path, lnks_path, ans=None):
    vk_names = {}
    unique = set()
    audios = []

    for audio, vk_name in zip(audio_list, map(artist_title, audio_list)):
        vk_names.setdefault(vk_name, 0)
        iname = get_id_name(audio)
        if not vk_names[vk_name] or iname not in unique:
            audios.append(Audio(audio, vk_name, iname, dbl=vk_names[vk_name]))
            vk_names[vk_name] += 1
        unique.add(iname)

    for audio in audios:
        if vk_names[audio.vk_name] > 1:
            audio.dbl += 1
        audio()

    e_audios = enumerate(audios)

    params = (len(audios), path, lnks_path)

    if th_pool_sz > 1:
        params_q = Queue.Queue()

        for i in range(th_pool_sz):
            th = DownloaderThread(i + 1, download_audio, params_q)
            th.setDaemon(True)
            th.start()

        for p in e_audios:
            params_q.put(((p, ) + params))

        params_q.join()
    else:
        for p in e_audios:
            download_audio(p, *params, m_th=False)

    logging.warning("All Done.")

    y_or_n = lambda ans: ans.strip()[:1].lower() if ans else ''

    if y_or_n(ans) == 'n':
        return

    files = glob(os.path.join(path, FILENAME_FMT % '*'))
    playlist = {audio.name for audio in audios}
    old = [f for f in files if utils.basename(f) not in playlist]

    if old:
        logging.info('Outdated audios:\n' + '\n'.join(old))
        if not ans:
            prompt = "Delete outdated audios?[%d] (y or n) [n] " % len(old)
            ans = raw_input(prompt)
        if y_or_n(ans) == 'y':
            for f in old:
                logging.debug("Deleting %s ...", f)
                os.remove(f)
            logging.warning("Deleted outdated files [%d].", len(old))
예제 #8
0
    def run_individual_parameters(i,
                                  data_path,
                                  extension,
                                  save_folder,
                                  GRP=False,
                                  compute_p=True,
                                  **preprocessing_params):
        """
        Arguments:
            GRP -- True if processing the GRP data
        """
        files = utils.getAllFiles(data_path, extension)
        if GRP:
            file = files[0]
            filename = '{:02}'.format(
                i)  # Save the results by the subjects number
            subject_id = i
        else:
            file = files[i]
            filename = utils.basename(file)
            subject_id = None
        os.makedirs("Results/{}/AM/idx".format(save_folder), exist_ok=True)
        os.makedirs("Results/{}/AM/p_values".format(save_folder),
                    exist_ok=True)

        print("Processing", i, ":", filename)
        if os.path.exists('Results/{}/AM/p_values/{}.csv'.format(
                save_folder, filename)):
            exit()

        param_file = 'Results/{}/AIS/idx/{}.csv'.format(save_folder, filename)
        df, param_df = utils.loadData(file,
                                      get_params=True,
                                      param_file=param_file,
                                      subject_id=subject_id)
        data = utils.preprocess(df, **preprocessing_params)
        results, p_values = getLocalsForAllRegions(data,
                                                   param_df,
                                                   compute_p=compute_p)
        # Add back the trimmed sections
        padding = ((0, 0), (preprocessing_params.get('trim_start', 0),
                            preprocessing_params.get('trim_end', 0)))
        results = np.pad(results, padding, mode='constant', constant_values=0)

        pd.DataFrame(results).to_csv('Results/{}/AM/{}_AM.csv'.format(
            save_folder, filename),
                                     index=None,
                                     header=None)
        pd.DataFrame(p_values).to_csv('Results/{}/AM/p_values/{}.csv'.format(
            save_folder, filename),
                                      index=None,
                                      header=None)
        try:
            utils.plotHeatmap(results, divergent=True)
        except:
            pass
예제 #9
0
    def constructTitle(self):
        smiley = ":-)" if self.status() == Result.NoError else ":-("

        return "{0}: {1} on {2} testing {3} - {4} {5}".format(
            self._revision,
            utils.basename(self._installerSourceLocation),
            self._vm.name(),
            self._testcase.name(),
            self.statusAsNiceString(),
            smiley,
        )
예제 #10
0
    def __getitem__(self, idx):
        mix_file = self.mix_list[idx]
        mix = torch.Tensor(np.load(mix_file))
        mix_filename = utils.basename(mix_file)
        target_filenames = mix_filename.split(".")

        targets = list()
        for filename in target_filenames:
            targets.append(
                utils.find_paths_contains(filename, self.target_list)[0])
        targets = list(map(lambda t: torch.Tensor(np.load(t)), targets))
        return mix, torch.stack(targets, dim=3), None
예제 #11
0
def fontforge_convert(fullpath):
	global fontforge_available
	if not fontforge_available:
		return False

	path, file = os.path.split(fullpath)
	target     = os.path.join(setup.svg_font_path, utils.basename(file)+'.svg')
	cmd = "fontforge -c 'Open($1); Generate($2)' %s %s" % (fullpath, target)
	exitstatus = os.system(cmd)
	if exitstatus:
		if exitstatus >> 8 == 127:
			fontforge_available = False
		return False
	else:
		return True
예제 #12
0
def fontforge_convert(fullpath):
    global fontforge_available
    if not fontforge_available:
        return False

    path, file = os.path.split(fullpath)
    target = os.path.join(setup.svg_font_path, utils.basename(file) + '.svg')
    cmd = "fontforge -c 'Open($1); Generate($2)' %s %s" % (fullpath, target)
    exitstatus = os.system(cmd)
    if exitstatus:
        if exitstatus >> 8 == 127:
            fontforge_available = False
        return False
    else:
        return True
예제 #13
0
def Comic(archive_path, user=None, page=0):
    """Return an oc with all pages in archive_path. if page > 0 return pages [page - Prefs['resume_length']:]"""
    oc = ObjectContainer(title2=unicode(os.path.basename(archive_path)), no_cache=True)
    try:
        archive = archives.get_archive(archive_path)
    except archives.ArchiveError as e:
        Log.Error(e)
        return error_message('bad archive', 'unable to open archive: {}'.format(archive_path))
    for f in utils.sorted_nicely(archive.namelist()):
        page_title, ext = utils.splitext(f)
        if not ext or ext.lower() not in utils.IMAGE_FORMATS:
            continue
        decoration = None
        if page > 0:
            m = utils.PAGE_NUM_REGEX.search(f)
            if m:
                page_num = int(m.group(1))
                if page_num < page - int(Prefs['resume_length']):
                    continue
                if page_num <= page:
                    decoration = '>'
        page_title = utils.basename(page_title)
        if decoration is not None:
            page_title = '{} {}'.format(decoration, page_title)

        if type(page_title) != unicode:
            try:
                page_title = page_title.decode('cp437')
            except Exception:
                try:
                    page_title = unicode(page_title, errors='replace')
                except Exception:
                    pass

        oc.add(CreatePhotoObject(
            media_key=Callback(GetImage, archive_path=String.Encode(archive_path),
                               filename=String.Encode(f), user=user, extension=ext.lstrip('.'),
                               time=int(time.time()) if bool(Prefs['prevent_caching']) else 0),
            rating_key=hashlib.sha1('{}{}{}'.format(archive_path, f, user)).hexdigest(),
            title=page_title,
            thumb=utils.thumb_transcode(Callback(get_thumb, archive_path=archive_path,
                                                 filename=f))))
    return oc
예제 #14
0
def Comic(archive_path, user=None, page=0):
    """Return an oc with all pages in archive_path. if page > 0 return pages [page - Prefs['resume_length']:]"""
    oc = ObjectContainer(title2=unicode(os.path.basename(archive_path)), no_cache=True)
    try:
        archive = archives.get_archive(archive_path)
    except archives.ArchiveError as e:
        Log.Error(e)
        return error_message('bad archive', 'unable to open archive: {}'.format(archive_path))
    for f in utils.sorted_nicely(archive.namelist()):
        page_title, ext = utils.splitext(f)
        if not ext or ext not in utils.IMAGE_FORMATS:
            continue
        decoration = None
        if page > 0:
            m = utils.PAGE_NUM_REGEX.search(f)
            if m:
                page_num = int(m.group(1))
                if page_num < page - int(Prefs['resume_length']):
                    continue
                if page_num <= page:
                    decoration = '>'
        page_title = utils.basename(page_title)
        if decoration is not None:
            page_title = '{} {}'.format(decoration, page_title)

        if type(page_title) != unicode:
            try:
                page_title = page_title.decode('cp437')
            except Exception:
                try:
                    page_title = unicode(page_title, errors='replace')
                except Exception:
                    pass

        oc.add(CreatePhotoObject(
            media_key=Callback(GetImage, archive_path=String.Encode(archive_path),
                               filename=String.Encode(f), user=user, extension=ext.lstrip('.'),
                               time=int(time.time()) if bool(Prefs['prevent_caching']) else 0),
            rating_key=hashlib.sha1('{}{}{}'.format(archive_path, f, user)).hexdigest(),
            title=page_title,
            thumb=utils.thumb_transcode(Callback(get_thumb, archive_path=archive_path,
                                                 filename=f))))
    return oc
예제 #15
0
    def __call__(self):
        options, args = self.parser.parse_args(self.gitify.args[2:])

        package_name = basename()
        svntype = svn_type()

        if svntype == 'tags':
            print "Can't work on tags!"
            sys.exit(1)
        elif svntype == 'unrecognized':
            print "Unrecognized svn structure!"
            sys.exit(1)

        if not exists(config.GIT_CACHE + package_name):
            print "No git repository found in %s." % config.GIT_CACHE
            print "Initiating cloning into cache."
            clone()

        # get the branch svn is on
        remote_branch = svn_branch()
        # the following is just convention:
        local_branch = "local/%s" % remote_branch

        cwd = os.getcwd()
        # perform all index updates in the cache to avoid conflicts
        os.chdir(config.GIT_CACHE + package_name)

        dummy, existing_branches = popen('git b', False, False)
        existing_branches = [b.strip() for b in existing_branches]
        if local_branch in existing_branches:
            popen('git checkout -f %s' % local_branch, False, False)
        else:
            popen('git checkout -f -b %s %s' % (local_branch, remote_branch),
                False, False)

        os.chdir(cwd)
        if not exists('.git'):
            popen('ln -s %s%s/.git' % (config.GIT_CACHE, package_name), False, False)

        print "Git branch '%s' is now following svn branch '%s':" % (
            local_branch, remote_branch)
        popen('svn status')
        popen('git status')
예제 #16
0
파일: core.py 프로젝트: starseeker/archival
def parser():
    """Interface to g.parser, intended to be run from the top-level, e.g.:

    ::

        if __name__ == "__main__":
            options, flags = grass.parser()
            main()

    Thereafter, the global variables "options" and "flags" will be
    dictionaries containing option/flag values, keyed by lower-case
    option/flag names. The values in "options" are strings, those in
    "flags" are Python booleans.
    """
    if not os.getenv("GISBASE"):
        print >> sys.stderr, "You must be in GRASS GIS to run this program."
        sys.exit(1)

    cmdline = [basename(sys.argv[0])]
    cmdline += ['"' + arg + '"' for arg in sys.argv[1:]]
    os.environ['CMDLINE'] = ' '.join(cmdline)

    argv = sys.argv[:]
    name = argv[0]
    if not os.path.isabs(name):
        if os.sep in name or (os.altsep and os.altsep in name):
            argv[0] = os.path.abspath(name)
        else:
            argv[0] = os.path.join(sys.path[0], name)

    prog = "g.parser.exe" if sys.platform == "win32" else "g.parser"
    p = subprocess.Popen([prog, '-n'] + argv, stdout=subprocess.PIPE)
    s = p.communicate()[0]
    lines = s.split('\0')

    if not lines or lines[0] != "@ARGS_PARSED@":
        sys.stdout.write(s)
        sys.exit(p.returncode)

    return _parse_opts(lines[1:])
예제 #17
0
파일: core.py 프로젝트: caomw/grass
def parser():
    """Interface to g.parser, intended to be run from the top-level, e.g.:

    ::

        if __name__ == "__main__":
            options, flags = grass.parser()
            main()

    Thereafter, the global variables "options" and "flags" will be
    dictionaries containing option/flag values, keyed by lower-case
    option/flag names. The values in "options" are strings, those in
    "flags" are Python booleans.
    """
    if not os.getenv("GISBASE"):
        print >> sys.stderr, "You must be in GRASS GIS to run this program."
        sys.exit(1)

    cmdline = [basename(sys.argv[0])]
    cmdline += ['"' + arg + '"' for arg in sys.argv[1:]]
    os.environ['CMDLINE'] = ' '.join(cmdline)

    argv = sys.argv[:]
    name = argv[0]
    if not os.path.isabs(name):
        if os.sep in name or (os.altsep and os.altsep in name):
            argv[0] = os.path.abspath(name)
        else:
            argv[0] = os.path.join(sys.path[0], name)

    prog = "g.parser.exe" if sys.platform == "win32" else "g.parser"
    p = subprocess.Popen([prog, '-n'] + argv, stdout=subprocess.PIPE)
    s = p.communicate()[0]
    lines = s.split('\0')

    if not lines or lines[0] != "@ARGS_PARSED@":
        sys.stdout.write(s)
        sys.exit(p.returncode)

    return _parse_opts(lines[1:])
    def _toFieldSubValue(self, subvalue, current_field_value):
        """
        Converts a subvalue to an `INamedFile`.

        Parameters:
        subvalue -- The value extracted from the request by the widget.
        current_field_value -- The current value of the field on the context.

        Return: an `INamedFile` or `None` if the subvalue cannot be converted.
        """
        if isinstance(subvalue, basestring) and subvalue.startswith('index:'):
            index = int(subvalue.split(':')[1])
            return current_field_value[index]
        elif INamedFile.providedBy(subvalue):
            return subvalue
        else:
            filename = getattr(subvalue, 'filename', None)
            if filename:
                filename = basename(filename)
                return NamedFile(subvalue, filename=filename.decode('utf-8'))

        return None
예제 #19
0
def main():
    a = get_args()

    prev_enc = 0
    def train(i):
        loss = 0
        
        noise = a.noise * torch.randn(1, 1, *params[0].shape[2:4], 1).cuda() if a.noise > 0 else None
        img_out = image_f(noise)

        micro = None if a.in_txt2 is None else False
        imgs_sliced = slice_imgs([img_out], a.samples, a.modsize, norm_in, a.overscan, micro=micro)
        out_enc = model_clip.encode_image(imgs_sliced[-1])
        if a.diverse > 0:
            imgs_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, norm_in, a.overscan, micro=micro)
            out_enc2 = model_clip.encode_image(imgs_sliced[-1])
            loss += a.diverse * torch.cosine_similarity(out_enc, out_enc2, dim=-1).mean()
            del out_enc2; torch.cuda.empty_cache()
        if a.in_img is not None and os.path.isfile(a.in_img): # input image
            loss +=  sign * torch.cosine_similarity(img_enc, out_enc, dim=-1).mean()
        if a.in_txt is not None: # input text
            loss +=  sign * torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
        if a.in_txt0 is not None: # subtract text
            loss += -sign * torch.cosine_similarity(txt_enc0, out_enc, dim=-1).mean()
        if a.sync > 0 and a.in_img is not None and os.path.isfile(a.in_img): # image composition
            loss *= 1. + a.sync * (a.steps/(i+1) * ssim_loss(img_out, img_in) - 1)
        if a.in_txt2 is not None: # input text for micro details
            imgs_sliced = slice_imgs([img_out], a.samples, a.modsize, norm_in, a.overscan, micro=True)
            out_enc2 = model_clip.encode_image(imgs_sliced[-1])
            loss +=  sign * torch.cosine_similarity(txt_enc2, out_enc2, dim=-1).mean()
            del out_enc2; torch.cuda.empty_cache()
        if a.expand > 0:
            global prev_enc
            if i > 0:
                loss += a.expand * torch.cosine_similarity(out_enc, prev_enc, dim=-1).mean()
            prev_enc = out_enc.detach()

        del img_out, imgs_sliced, out_enc; torch.cuda.empty_cache()
        assert not isinstance(loss, int), ' Loss not defined, check the inputs'
        
        if a.prog is True:
            lr_cur = lr0 + (i / a.steps) * (lr1 - lr0)
            for g in optimizer.param_groups: 
                g['lr'] = lr_cur
    
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % a.fstep == 0:
            with torch.no_grad():
                img = image_f(contrast=a.contrast).cpu().numpy()[0]
            checkout(img, os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)), verbose=a.verbose)
            pbar.upd()

    # Load CLIP models
    model_clip, _ = clip.load(a.model)
    if a.verbose is True: print(' using model', a.model)
    xmem = {'RN50':0.5, 'RN50x4':0.16, 'RN101':0.33}
    if 'RN' in a.model:
        a.samples = int(a.samples * xmem[a.model])
            
    if a.diverse > 0:
        a.samples = int(a.samples * 0.5)
            
    norm_in = torchvision.transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))

    out_name = []
    if a.in_img is not None and os.path.isfile(a.in_img):
        if a.verbose is True: print(' ref image:', basename(a.in_img))
        img_in = torch.from_numpy(img_read(a.in_img)/255.).unsqueeze(0).permute(0,3,1,2).cuda()
        img_in = img_in[:,:3,:,:] # fix rgb channels
        in_sliced = slice_imgs([img_in], a.samples, a.modsize, transform=norm_in, overscan=a.overscan)[0]
        img_enc = model_clip.encode_image(in_sliced).detach().clone()
        if a.sync > 0:
            ssim_loss = ssim.SSIM(window_size = 11)
            img_in = F.interpolate(img_in, a.size).float()
        else:
            del img_in
        del in_sliced; torch.cuda.empty_cache()
        out_name.append(basename(a.in_img).replace(' ', '_'))

    if a.in_txt is not None:
        if a.verbose is True: print(' ref text: ', basename(a.in_txt))
        if a.translate:
            translator = Translator()
            a.in_txt = translator.translate(a.in_txt, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt) 
        tx = clip.tokenize(a.in_txt).cuda()
        txt_enc = model_clip.encode_text(tx).detach().clone()
        out_name.append(txt_clean(a.in_txt))

    if a.in_txt2 is not None:
        if a.verbose is True: print(' micro text:', basename(a.in_txt2))
        a.samples = int(a.samples * 0.75)
        if a.translate:
            translator = Translator()
            a.in_txt2 = translator.translate(a.in_txt2, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt2) 
        tx2 = clip.tokenize(a.in_txt2).cuda()
        txt_enc2 = model_clip.encode_text(tx2).detach().clone()
        out_name.append(txt_clean(a.in_txt2))

    if a.in_txt0 is not None:
        if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
        a.samples = int(a.samples * 0.75)
        if a.translate:
            translator = Translator()
            a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt0) 
        tx0 = clip.tokenize(a.in_txt0).cuda()
        txt_enc0 = model_clip.encode_text(tx0).detach().clone()
        out_name.append('off-' + txt_clean(a.in_txt0))

    params, image_f = fft_image([1, 3, *a.size], resume=a.resume)
    image_f = to_valid_rgb(image_f)

    if a.prog is True:
        lr1 = a.lrate * 2
        lr0 = lr1 * 0.01
    else:
        lr0 = a.lrate
    optimizer = torch.optim.Adam(params, lr0)
    sign = 1. if a.invert is True else -1.

    if a.verbose is True: print(' samples:', a.samples)
    out_name = '-'.join(out_name)
    out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
    tempdir = os.path.join(a.out_dir, out_name)
    os.makedirs(tempdir, exist_ok=True)

    pbar = ProgressBar(a.steps // a.fstep)
    for i in range(a.steps):
        train(i)

    os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' % (tempdir, os.path.join(a.out_dir, out_name)))
    shutil.copy(img_list(tempdir)[-1], os.path.join(a.out_dir, '%s-%d.jpg' % (out_name, a.steps)))
    if a.save_pt is True:
        torch.save(params, '%s.pt' % os.path.join(a.out_dir, out_name))
예제 #20
0
def main():
    a = get_args()

    prev_enc = 0

    def train(i):
        loss = 0

        noise = a.noise * torch.randn(1, 1, *params[0].shape[2:4],
                                      1).cuda() if a.noise > 0 else None
        img_out = image_f(noise)

        if a.sharp != 0:
            lx = torch.mean(
                torch.abs(img_out[0, :, :, 1:] - img_out[0, :, :, :-1]))
            ly = torch.mean(
                torch.abs(img_out[0, :, 1:, :] - img_out[0, :, :-1, :]))
            loss -= a.sharp * (ly + lx)

        micro = 1 - a.macro if a.in_txt2 is None else False
        imgs_sliced = slice_imgs([img_out],
                                 a.samples,
                                 a.modsize,
                                 trform_f,
                                 a.align,
                                 micro=micro)
        out_enc = model_clip.encode_image(imgs_sliced[-1])
        if a.diverse != 0:
            imgs_sliced = slice_imgs([image_f(noise)],
                                     a.samples,
                                     a.modsize,
                                     trform_f,
                                     a.align,
                                     micro=micro)
            out_enc2 = model_clip.encode_image(imgs_sliced[-1])
            loss += a.diverse * torch.cosine_similarity(
                out_enc, out_enc2, dim=-1).mean()
            del out_enc2
            torch.cuda.empty_cache()
        if a.in_img is not None and os.path.isfile(a.in_img):  # input image
            loss += sign * 0.5 * torch.cosine_similarity(
                img_enc, out_enc, dim=-1).mean()
        if a.in_txt is not None:  # input text
            loss += sign * torch.cosine_similarity(txt_enc, out_enc,
                                                   dim=-1).mean()
            if a.notext > 0:
                loss -= sign * a.notext * torch.cosine_similarity(
                    txt_plot_enc, out_enc, dim=-1).mean()
        if a.in_txt0 is not None:  # subtract text
            loss += -sign * torch.cosine_similarity(txt_enc0, out_enc,
                                                    dim=-1).mean()
        if a.sync > 0 and a.in_img is not None and os.path.isfile(
                a.in_img):  # image composition
            prog_sync = (a.steps // a.fstep - i) / (a.steps // a.fstep)
            loss += prog_sync * a.sync * sim_loss(F.interpolate(
                img_out, sim_size).float(),
                                                  img_in,
                                                  normalize=True).squeeze()
        if a.in_txt2 is not None:  # input text for micro details
            imgs_sliced = slice_imgs([img_out],
                                     a.samples,
                                     a.modsize,
                                     trform_f,
                                     a.align,
                                     micro=True)
            out_enc2 = model_clip.encode_image(imgs_sliced[-1])
            loss += sign * torch.cosine_similarity(txt_enc2, out_enc2,
                                                   dim=-1).mean()
            del out_enc2
            torch.cuda.empty_cache()
        if a.expand > 0:
            global prev_enc
            if i > 0:
                loss += a.expand * torch.cosine_similarity(
                    out_enc, prev_enc, dim=-1).mean()
            prev_enc = out_enc.detach()

        del img_out, imgs_sliced, out_enc
        torch.cuda.empty_cache()
        assert not isinstance(loss, int), ' Loss not defined, check the inputs'

        if a.prog is True:
            lr_cur = lr0 + (i / a.steps) * (lr1 - lr0)
            for g in optimizer.param_groups:
                g['lr'] = lr_cur

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i % a.fstep == 0:
            with torch.no_grad():
                img = image_f(contrast=a.contrast).cpu().numpy()[0]
            if (a.sync > 0 and a.in_img is not None) or a.sharp != 0:
                img = img**1.3  # empirical tone mapping
            checkout(img,
                     os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)),
                     verbose=a.verbose)
            pbar.upd()

    # Load CLIP models
    model_clip, _ = clip.load(a.model)
    if a.verbose is True: print(' using model', a.model)
    xmem = {'RN50': 0.5, 'RN50x4': 0.16, 'RN101': 0.33}
    if 'RN' in a.model:
        a.samples = int(a.samples * xmem[a.model])

    if a.multilang is True:
        model_lang = SentenceTransformer(
            'clip-ViT-B-32-multilingual-v1').cuda()

    def enc_text(txt):
        if a.multilang is True:
            emb = model_lang.encode([txt],
                                    convert_to_tensor=True,
                                    show_progress_bar=False)
        else:
            emb = model_clip.encode_text(clip.tokenize(txt).cuda())
        return emb.detach().clone()

    if a.diverse != 0:
        a.samples = int(a.samples * 0.5)
    if a.sync > 0:
        a.samples = int(a.samples * 0.5)

    if a.transform is True:
        trform_f = transforms.transforms_custom
        a.samples = int(a.samples * 0.95)
    else:
        trform_f = transforms.normalize()

    out_name = []
    if a.in_txt is not None:
        if a.verbose is True: print(' ref text: ', basename(a.in_txt))
        if a.translate:
            translator = Translator()
            a.in_txt = translator.translate(a.in_txt, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt)
        txt_enc = enc_text(a.in_txt)
        out_name.append(txt_clean(a.in_txt))

        if a.notext > 0:
            txt_plot = torch.from_numpy(plot_text(a.in_txt, a.modsize) /
                                        255.).unsqueeze(0).permute(0, 3, 1,
                                                                   2).cuda()
            txt_plot_enc = model_clip.encode_image(txt_plot).detach().clone()

    if a.in_txt2 is not None:
        if a.verbose is True: print(' micro text:', basename(a.in_txt2))
        a.samples = int(a.samples * 0.75)
        if a.translate:
            translator = Translator()
            a.in_txt2 = translator.translate(a.in_txt2, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt2)
        txt_enc2 = enc_text(a.in_txt2)
        out_name.append(txt_clean(a.in_txt2))

    if a.in_txt0 is not None:
        if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
        a.samples = int(a.samples * 0.75)
        if a.translate:
            translator = Translator()
            a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt0)
        txt_enc0 = enc_text(a.in_txt0)
        out_name.append('off-' + txt_clean(a.in_txt0))

    if a.multilang is True: del model_lang

    if a.in_img is not None and os.path.isfile(a.in_img):
        if a.verbose is True: print(' ref image:', basename(a.in_img))
        img_in = torch.from_numpy(
            img_read(a.in_img) / 255.).unsqueeze(0).permute(0, 3, 1, 2).cuda()
        img_in = img_in[:, :3, :, :]  # fix rgb channels
        in_sliced = slice_imgs([img_in],
                               a.samples,
                               a.modsize,
                               transforms.normalize(),
                               a.align,
                               micro=False)[0]
        img_enc = model_clip.encode_image(in_sliced).detach().clone()
        if a.sync > 0:
            sim_loss = lpips.LPIPS(net='vgg', verbose=False).cuda()
            sim_size = [s // 2 for s in a.size]
            img_in = F.interpolate(img_in, sim_size).float()
        else:
            del img_in
        del in_sliced
        torch.cuda.empty_cache()
        out_name.append(basename(a.in_img).replace(' ', '_'))

    params, image_f = fft_image([1, 3, *a.size],
                                resume=a.resume,
                                decay_power=a.decay)
    image_f = to_valid_rgb(image_f, colors=a.colors)

    if a.prog is True:
        lr1 = a.lrate * 2
        lr0 = lr1 * 0.01
    else:
        lr0 = a.lrate
    optimizer = torch.optim.Adam(params, lr0)
    sign = 1. if a.invert is True else -1.

    if a.verbose is True: print(' samples:', a.samples)
    out_name = '-'.join(out_name)
    out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
    tempdir = os.path.join(a.out_dir, out_name)
    os.makedirs(tempdir, exist_ok=True)

    pbar = ProgressBar(a.steps // a.fstep)
    for i in range(a.steps):
        train(i)

    os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' %
              (tempdir, os.path.join(a.out_dir, out_name)))
    shutil.copy(
        img_list(tempdir)[-1],
        os.path.join(a.out_dir, '%s-%d.jpg' % (out_name, a.steps)))
    if a.save_pt is True:
        torch.save(params, '%s.pt' % os.path.join(a.out_dir, out_name))
 def mkTempPath( self, filename ):
     return self._tempDir + self.pathSep() + utils.basename( filename )
예제 #22
0
파일: audio.py 프로젝트: inkerra/python-vk
def get_id_name(audio):
    return utils.basename(audio.find("url").text)
예제 #23
0
    def infer(self, args):
        # print('infer() :', args)

        if 'image_features' not in args:
            args['image_features'] = None

        # Image preprocessing
        transform = transforms.Compose([
            transforms.Resize((args['resize'], args['resize'])),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406),
                                 (0.229, 0.224, 0.225))])

        # Get dataset parameters:
        dataset_configs = DatasetParams(args['dataset_config_file'])
        dataset_params = dataset_configs.get_params(args['dataset'],
                                                    args['image_dir'],
                                                    args['image_files'],
                                                    args['image_features'])

        if self.params.has_external_features() and \
           any(dc.name == 'generic' for dc in dataset_params):
            print('WARNING: you cannot use external features without specifying all datasets in '
                  'datasets.conf.')
            print('Hint: take a look at datasets/datasets.conf.default.')

        # Build data loader
        print("Loading dataset: {}".format(args['dataset']))

        # Update dataset params with needed model params:
        for i in dataset_params:
            i.config_dict['skip_start_token'] = self.params.skip_start_token
            # For visualizing attention we need file names instead of IDs in our output:
            if args['store_image_paths']:
                i.config_dict['return_image_file_name'] = True

        ext_feature_sets = [self.params.features.external, self.params.persist_features.external]
        if args['dataset']=='incore':
            ext_feature_sets = None
        
        # We ask it to iterate over images instead of all (image, caption) pairs
        data_loader, ef_dims = get_loader(dataset_params, vocab=None, transform=transform,
                                          batch_size=args['batch_size'], shuffle=False,
                                          num_workers=args['num_workers'],
                                          ext_feature_sets=ext_feature_sets,
                                          skip_images=not self.params.has_internal_features(),
                                          iter_over_images=True)

        self.data_loader = data_loader
        # Create model directory
        if not os.path.exists(args['results_path']):
            os.makedirs(args['results_path'])

        scorers = {}
        if args['scoring'] is not None:
            for s in args['scoring'].split(','):
                s = s.lower().strip()
                if s == 'cider':
                    from eval.cider import Cider
                    scorers['CIDEr'] = Cider(df='corpus')

        # Store captions here:
        output_data = []

        gts = {}
        res = {}

        print('Starting inference, max sentence length: {} num_workers: {}'.\
              format(args['max_seq_length'], args['num_workers']))
        show_progress = sys.stderr.isatty() and not args['verbose'] \
                        and ext_feature_sets is not None

        for i, (images, ref_captions, lengths, image_ids,
                features) in enumerate(tqdm(self.data_loader, disable=not show_progress)):

            if len(scorers) > 0:
                for j in range(len(ref_captions)):
                    jid = image_ids[j]
                    if jid not in gts:
                        gts[jid] = []
                    rcs = ref_captions[j]
                    if type(rcs) is str:
                        rcs = [rcs]
                    for rc in rcs:
                        gts[jid].append(rc.lower())

            images = images.to(device)

            init_features    = features[0].to(device) if len(features) > 0 and \
                               features[0] is not None else None
            persist_features = features[1].to(device) if len(features) > 1 and \
                               features[1] is not None else None

            # Generate a caption from the image
            sampled_batch = self.model.sample(images, init_features, persist_features,
                                              max_seq_length=args['max_seq_length'],
                                              start_token_id=self.vocab('<start>'),
                                              end_token_id=self.vocab('<end>'),
                                              alternatives=args['alternatives'],
                                              probabilities=args['probabilities'])

            sampled_ids_batch = sampled_batch

            for i in range(len(sampled_ids_batch)):
                sampled_ids = sampled_ids_batch[i]

                # Convert word_ids to words
                if self.params.hierarchical_model:
                    # assert False, 'paragraph_ids_to_words() need to be updated'
                    caption = paragraph_ids_to_words(sampled_ids, self.vocab,
                                                     skip_start_token=True)
                else:
                    caption = caption_ids_ext_to_words(sampled_ids, self.vocab,
                                                       skip_start_token=True,
                                                       capitalize=not args['no_capitalize'])
                if args['no_repeat_sentences']:
                    caption = remove_duplicate_sentences(caption)

                if args['only_complete_sentences']:
                    caption = remove_incomplete_sentences(caption)

                if args['verbose']:
                    print('=>', caption)

                if True:
                    caption = self.apply_lemma_pos_rules(caption)
                    if args['verbose']:
                        print('#>', caption)
                    
                output_data.append({'image_id': image_ids[i],
                                    'caption': caption})
                res[image_ids[i]] = [caption.lower()]

        for score_name, scorer in scorers.items():
            score = scorer.compute_score(gts, res)[0]
            print('Test', score_name, score)

        # Decide output format, fall back to txt
        if args['output_format'] is not None:
            output_format = args['output_format']
        elif args['output_file'] and args['output_file'].endswith('.json'):
            output_format = 'json'
        else:
            output_format = 'txt'

        # Create a sensible default output path for results:
        output_file = None
        if not args['output_file'] and not args['print_results']:
            model_name_path = Path(args['model'])
            is_in_same_folder = len(model_name_path.parents) == 1
            if not is_in_same_folder:
                model_name = args['model'].split(os.sep)[-2]
                model_epoch = basename(args['model'])
                output_file = '{}-{}.{}'.format(model_name, model_epoch,
                                                output_format)
            else:
                output_file = model_name_path.stem + '.' + output_format
        else:
            output_file = args['output_file']

        if output_file:
            output_path = os.path.join(args['results_path'], output_file)
            if output_format == 'json':
                json.dump(output_data, open(output_path, 'w'))
            else:
                with open(output_path, 'w') as fp:
                    for data in output_data:
                        print(data['image_id'], data['caption'], file=fp)

            print('Wrote generated captions to {} as {}'.
                  format(output_path, args['output_format']))

        if args['print_results']:
            for d in output_data:
                print('{}: {}'.format(d['image_id'], d['caption']))

        return output_data
예제 #24
0
 def mkTempPath(self, filename):
     return self._tempDir + self.pathSep() + utils.basename(filename)
예제 #25
0
def main():
    a = get_args()

    # Load CLIP models
    model_clip, _ = clip.load(a.model)
    if a.verbose is True: print(' using model', a.model)
    xmem = {'RN50': 0.5, 'RN50x4': 0.16, 'RN101': 0.33}
    if 'RN' in a.model:
        a.samples = int(a.samples * xmem[a.model])
    workdir = os.path.join(a.out_dir, basename(a.in_txt))
    workdir += '-%s' % a.model if 'RN' in a.model.upper() else ''
    os.makedirs(workdir, exist_ok=True)

    norm_in = torchvision.transforms.Normalize(
        (0.48145466, 0.4578275, 0.40821073),
        (0.26862954, 0.26130258, 0.27577711))

    if a.in_txt0 is not None:
        if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
        if a.translate:
            translator = Translator()
            a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt0)
        tx0 = clip.tokenize(a.in_txt0).cuda()
        txt_enc0 = model_clip.encode_text(tx0).detach().clone()

    # make init
    global params_start
    params_shape = [1, 3, a.size[0], a.size[1] // 2 + 1, 2]
    params_start = torch.randn(*params_shape).cuda()  # random init

    if a.resume is not None and os.path.isfile(a.resume):
        if a.verbose is True: print(' resuming from', a.resume)
        params, _ = fft_image([1, 3, *a.size], resume=a.resume)
        params_start = ema(params_start, params[0].detach(), 1)
    else:
        a.resume = 'init.pt'

    shutil.copy(a.resume,
                os.path.join(workdir, '000-%s.pt' % basename(a.resume)))
    torch.save(params_start, 'init.pt')  # final init

    def process(txt, num):

        global params_start
        params, image_f = fft_image([1, 3, *a.size], resume='init.pt')
        image_f = to_valid_rgb(image_f)
        optimizer = torch.optim.Adam(params, a.lrate)

        if a.verbose is True: print(' ref text: ', txt)
        if a.translate:
            translator = Translator()
            txt = translator.translate(txt, dest='en').text
            if a.verbose is True: print(' translated to:', txt)
        tx = clip.tokenize(txt).cuda()
        txt_enc = model_clip.encode_text(tx).detach().clone()

        out_name = '%03d-%s' % (num + 1, txt_clean(txt))
        out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
        tempdir = os.path.join(workdir, out_name)
        os.makedirs(tempdir, exist_ok=True)

        pbar = ProgressBar(a.steps // a.fstep)
        for i in range(a.steps):
            loss = 0

            noise = a.noise * torch.randn(1, 1, *params[0].shape[2:4],
                                          1).cuda() if a.noise > 0 else None
            img_out = image_f(noise)

            imgs_sliced = slice_imgs([img_out],
                                     a.samples,
                                     a.modsize,
                                     norm_in,
                                     a.overscan,
                                     micro=None)
            out_enc = model_clip.encode_image(imgs_sliced[-1])
            loss -= torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
            if a.in_txt0 is not None:  # subtract text
                loss += torch.cosine_similarity(txt_enc0, out_enc,
                                                dim=-1).mean()
            del img_out, imgs_sliced, out_enc
            torch.cuda.empty_cache()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % a.fstep == 0:
                with torch.no_grad():
                    img = image_f(contrast=a.contrast).cpu().numpy()[0]
                checkout(img,
                         os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)),
                         verbose=a.verbose)
                pbar.upd()
                del img

        if a.keep == 'all':
            params_start = ema(params_start, params[0].detach(), num + 1)
            torch.save(params_start, 'init.pt')
        elif a.keep == 'last':
            torch.save((params_start + params[0].detach()) / 2, 'init.pt')

        torch.save(params[0], '%s.pt' % os.path.join(workdir, out_name))
        shutil.copy(
            img_list(tempdir)[-1],
            os.path.join(workdir, '%s-%d.jpg' % (out_name, a.steps)))
        os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' %
                  (tempdir, os.path.join(workdir, out_name)))

    with open(a.in_txt, 'r', encoding="utf-8") as f:
        texts = f.readlines()
        texts = [
            tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#'
        ]
    if a.verbose is True:
        print(' total lines:', len(texts))
        print(' samples:', a.samples)

    for i, txt in enumerate(texts):
        process(txt, i)

    vsteps = int(a.length * 25 / len(texts))  # 25 fps
    tempdir = os.path.join(workdir, '_final')
    os.makedirs(tempdir, exist_ok=True)

    def read_pt(file):
        return torch.load(file).cuda()

    if a.verbose is True: print(' rendering complete piece')
    ptfiles = file_list(workdir, 'pt')
    pbar = ProgressBar(vsteps * len(ptfiles))
    for px in range(len(ptfiles)):
        params1 = read_pt(ptfiles[px])
        params2 = read_pt(ptfiles[(px + 1) % len(ptfiles)])

        params, image_f = fft_image([1, 3, *a.size], resume=params1)
        image_f = to_valid_rgb(image_f)

        for i in range(vsteps):
            with torch.no_grad():
                img = image_f(
                    (params2 - params1) *
                    math.sin(1.5708 * i / vsteps)**2)[0].permute(1, 2, 0)
                img = torch.clip(img * 255, 0,
                                 255).cpu().numpy().astype(np.uint8)
            imsave(os.path.join(tempdir, '%05d.jpg' % (px * vsteps + i)), img)
            if a.verbose is True: cvshow(img)
            pbar.upd()

    os.system('ffmpeg -v warning -y -i %s\%%05d.jpg "%s.mp4"' %
              (tempdir, os.path.join(a.out_dir, basename(a.in_txt))))
    if a.keep is True: os.remove('init.pt')
예제 #26
0
    def __call__(self):
        options, args = self.parser.parse_args(self.gitify.args[2:])

        if not is_svn():
            print "This only works on svn checkouts!"
            sys.exit(1)

        package_name = basename()
        svntype = svn_type()

        if svntype == 'tags':
            print "Can't work on tags!"
            sys.exit(1)
        elif svntype == 'unrecognized':
            print "Unrecognized svn structure!"
            sys.exit(1)

        if not exists(config.GIT_CACHE + package_name):
            print "No git repository found in %s." % config.GIT_CACHE
            print "Initiating cloning into cache."
            clone()
        else:
            # if we already have a cached copy, make sure it's up-to-date:
            print "Updating existing cache:"
            gitify(args=['fetch', package_name])

        # get the branch svn is on
        remote_branch = svn_branch()
        # the following is just convention:
        local_branch = "local/%s" % remote_branch

        cwd = os.getcwd()
        # perform all index updates in the cache to avoid conflicts
        os.chdir(config.GIT_CACHE + package_name)

        dummy, existing_branches = popen('git branch', False, False)
        existing_branches = [b.strip('* ') for b in existing_branches]
        if local_branch in existing_branches:
            popen('git checkout -f %s' % local_branch, False, False)
        else:
            popen('git checkout -f -b %s %s' % (local_branch, remote_branch),
                  False, False)

        os.chdir(cwd)
        if not exists('.git'):
            popen('cp -Rp %s%s/.git .' % (config.GIT_CACHE, package_name),
                  False, False)

        # if the working copy is on another branch, switch:
        if local_branch != git_branch():
            if local_branch in existing_branches:
                popen('git checkout -f %s' % local_branch)
            else:
                popen('git checkout -b %s' % local_branch)

        assert git_branch() == local_branch, (
            "Changing branches failed, is on %r but should be on %r" %
            (git_branch(), local_branch))
        print "Git branch '%s' is now following svn branch '%s':" % (
            local_branch, remote_branch)
        popen('svn status')
        popen('git status')
import db_helper

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True, help="path to input video")
ap.add_argument("-s",
                "--show",
                action="store_true",
                help="show video with results")
args = vars(ap.parse_args())

faces = []
faces_count = []
data = []
tolerance = 0.55
name = basename(args["video"])

print("[INFO] Video name: " + name)
print("[INFO] Starting video processing...")
fvs = FileVideoStream(args["video"]).start()
time.sleep(1.0)

total_frames = int(fvs.stream.get(cv2.CAP_PROP_FRAME_COUNT))
frames_with_face = 0
frame_position = 0

fps = FPS().start()

while (fvs.more()):
    indexes = []
    frame_position += 1
예제 #28
0
파일: runTE.py 프로젝트: mili7522/N-back
def run(i,
        data_path,
        extension,
        save_folder,
        raw_save_root="/scratch/InfoDynFuncStruct/Mike/N-back/",
        save_every=20,
        GRP=False,
        compute_p=True,
        compress=False,
        set_k_to_0=False,
        calc_type='ksg',
        use_source_embedding=False,
        **preprocessing_params):
    """
    Run TE calculation for a particular subject. Parameters are loaded from file, based on the AIS calculation, or set
    to 0 if set_k_to_0 is True
    
    Arguments:
        i -- An Int which states which file or subject to load and process
        data_path -- Location of the data files
        extension -- File extension of the data (eg. .csv, .tsv, .mat)
        save_folder -- Subfolder of the 'Results' directory in which to save the local AIS values, parameters and p_values
        raw_save_root -- Location to save the raw local TE values (as a npz or npy file)
        save_every -- None, or Int giving the number of regions to calculate before saving the current state of the results
        GRP -- Set to True if processing the GRP data, which is one array of dimension (region, timepoints, subject)
        compute_p -- If True, computes the p value of the returned AIS
        compress -- If True, the raw TE values are saved as a compressed npz file instead of an npy file
        set_k_to_0 -- If True, skip loading of k and l parameters, instead initialising the DataFrame to zeros
        calc_type -- The type of estimator to use for the JIDT calculator - 'gaussian' or 'ksg'
        use_source_embedding -- If True, load up the values for history length and delay of the source from the AIS
                                calculations. Otherwise the source_history_length and source_delay parameters are set to 1
        preprocessing_params -- Parameters passed to utils.preprocess for preprocessing the time series data.
                                Includes sampling_rate / sampling_interval, mean_processing_type, trim_start, trim_end,
                                fcutlow, fcuthigh, use_filtfilt
    """
    start_time = time.time()
    files = utils.getAllFiles(data_path, extension)
    if GRP:
        file = files[0]
        filename = '{:02}'.format(i)  # Save the results by the subjects number
        subject_id = i
    else:
        file = files[i]
        filename = utils.basename(file)
        subject_id = None

    print("Processing file {}: {}".format(i, filename))
    # Check for the presence of the current_idx file
    # If it's not present, then either no calculations have been done, or the final results have already been saved
    if os.path.isfile(
            os.path.join(
                raw_save_root, "Results/{}/TE/raw/{}_current_idx.txt".format(
                    save_folder, filename))):
        # Load previous results, which are always saved in the uncompressed format
        results = np.load(
            os.path.join(
                raw_save_root,
                "Results/{}/TE/raw/{}.npy".format(save_folder, filename)))
        p_values = np.load(
            os.path.join(
                raw_save_root, "Results/{}/TE/raw/p_values/{}_p.npy".format(
                    save_folder, filename)))
        dce = np.load(
            os.path.join(
                raw_save_root, "Results/{}/TE/raw/dce/{}_dce.npy".format(
                    save_folder, filename)))
        with open(
                os.path.join(
                    raw_save_root,
                    "Results/{}/TE/raw/{}_current_idx.txt".format(
                        save_folder, filename)), 'r') as f:
            idx_values = f.readline()
        idx_values = list(map(int, idx_values.split(',')))
        print("Loading previous results")
        print("Starting from index", idx_values)
    else:
        results, p_values, dce, idx_values = None, None, None, None
        # Check both compressed and uncompressed options. If this file exists but the current_idx file doesn't then the
        # final results have already been saved. Exit to avoid running again
        if glob.glob(
                os.path.join(
                    raw_save_root,
                    "Results/{}/TE/raw/p_values/{}_p.np*".format(
                        save_folder, filename))):
            print("Result already present")
            exit()

    # Load parameter file
    param_file = 'Results/{}/AIS/params/{}_params.csv'.format(
        save_folder, filename)
    if set_k_to_0:
        df = utils.loadData(file, get_params=False, subject_id=subject_id)
        param_df = pd.DataFrame(np.zeros((len(df), 2), dtype=int),
                                columns=['k', 'tau'])
    else:
        df, param_df = utils.loadData(file,
                                      get_params=True,
                                      param_file=param_file,
                                      subject_id=subject_id)

    data = utils.preprocess(df, **preprocessing_params)
    saver = TEResultSaver(filename, save_folder, raw_save_root)
    calc = startCalc(calc_type)

    # Do the calculations
    results, p_values, dce = getLocalsForAllRegionPairs(
        data,
        param_df,
        calc,
        compute_p,
        saver=saver,
        save_every=save_every,
        results=results,
        p_values=p_values,
        dce=dce,
        idx_values=idx_values,
        use_source_embedding=use_source_embedding)

    # Save the final results
    # Add back the trimmed sections at the start and end of the timeseries by padding with zeros
    padding = ((0, 0), (preprocessing_params.get('trim_start', 0),
                        preprocessing_params.get('trim_end', 0)))
    saver.save_final_result(results,
                            p_values,
                            dce,
                            padding=padding,
                            compress=compress)
    print("\nTime taken:", round((time.time() - start_time) / 60, 2), 'min')
    # append the result to an array
    result = dict(timestamp=timestamp, fm=fm, quality=quality)
    data.append(result)

    # show the frame and the result
    if args["show"]:
        cv2.putText(frame, "{:.2f}".format(fm), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
        cv2.imshow("Video", frame)

    # update the FPS counter
    cv2.waitKey(1)
    fps.update()

# save results to db
name = utils.basename(args["video"])
avg = fmSum / total_frames
document = {"name": name, "avg": avg, "data": data}

db_helper.insert("blurDetection", document)
db_helper.printDocumentByName("blurDetection", name)

# stop the timer and display FPS information
fps.stop()
print("[INFO] Avg. variance of Laplacian: {:.2f}".format(avg))
print("[INFO] Elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] Approx. FPS: {:.2f}".format(fps.fps()))

# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()
예제 #30
0
def main():
    a = get_args()

    # Load CLIP models
    model_clip, _ = clip.load(a.model)
    if a.verbose is True: print(' using model', a.model)
    xmem = {'RN50':0.5, 'RN50x4':0.16, 'RN101':0.33}
    if 'RN' in a.model:
        a.samples = int(a.samples * xmem[a.model])
    workdir = os.path.join(a.out_dir, basename(a.in_txt))
    workdir += '-%s' % a.model if 'RN' in a.model.upper() else ''
    os.makedirs(workdir, exist_ok=True)

    if a.diverse != 0:
        a.samples = int(a.samples * 0.5)
            
    if a.transform is True:
        trform_f = transforms.transforms_custom  
        a.samples = int(a.samples * 0.95)
    else:
        trform_f = transforms.normalize()

    if a.in_txt0 is not None:
        if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
        if a.translate:
            translator = Translator()
            a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt0) 
        if a.multilang is True:
            model_lang = SentenceTransformer('clip-ViT-B-32-multilingual-v1').cuda()
            txt_enc0 = model_lang.encode([a.in_txt0], convert_to_tensor=True, show_progress_bar=False).detach().clone()
            del model_lang
        else:
            txt_enc0 = model_clip.encode_text(clip.tokenize(a.in_txt0).cuda()).detach().clone()

    # make init
    global params_start, params_ema
    params_shape = [1, 3, a.size[0], a.size[1]//2+1, 2]
    params_start = torch.randn(*params_shape).cuda() # random init
    params_ema = 0.
    if a.resume is not None and os.path.isfile(a.resume):
        if a.verbose is True: print(' resuming from', a.resume)
        params_start = load_params(a.resume).cuda()
        if a.keep > 0:
            params_ema = params_start[0].detach().clone()
    else:
        a.resume = 'init.pt'

    torch.save(params_start, 'init.pt') # final init
    shutil.copy(a.resume, os.path.join(workdir, '000-%s.pt' % basename(a.resume)))
    
    prev_enc = 0
    def process(txt, num):

        sd = 0.01
        if a.keep > 0: sd = a.keep + (1-a.keep) * sd
        params, image_f = fft_image([1, 3, *a.size], resume='init.pt', sd=sd, decay_power=a.decay)
        image_f = to_valid_rgb(image_f, colors = a.colors)

        if a.prog is True:
            lr1 = a.lrate * 2
            lr0 = a.lrate * 0.1
        else:
            lr0 = a.lrate
        optimizer = torch.optim.Adam(params, lr0)
    
        if a.verbose is True: print(' ref text: ', txt)
        if a.translate:
            translator = Translator()
            txt = translator.translate(txt, dest='en').text
            if a.verbose is True: print(' translated to:', txt)
        if a.multilang is True:
            model_lang = SentenceTransformer('clip-ViT-B-32-multilingual-v1').cuda()
            txt_enc = model_lang.encode([txt], convert_to_tensor=True, show_progress_bar=False).detach().clone()
            del model_lang
        else:
            txt_enc = model_clip.encode_text(clip.tokenize(txt).cuda()).detach().clone()
        if a.notext > 0:
            txt_plot = torch.from_numpy(plot_text(txt, a.modsize)/255.).unsqueeze(0).permute(0,3,1,2).cuda()
            txt_plot_enc = model_clip.encode_image(txt_plot).detach().clone()
        else: txt_plot_enc = None

        out_name = '%03d-%s' % (num+1, txt_clean(txt))
        out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
        tempdir = os.path.join(workdir, out_name)
        os.makedirs(tempdir, exist_ok=True)
        
        pbar = ProgressBar(a.steps // a.fstep)
        for i in range(a.steps):
            loss = 0

            noise = a.noise * torch.randn(1, 1, *params[0].shape[2:4], 1).cuda() if a.noise > 0 else None
            img_out = image_f(noise)
            
            if a.sharp != 0:
                lx = torch.mean(torch.abs(img_out[0,:,:,1:] - img_out[0,:,:,:-1]))
                ly = torch.mean(torch.abs(img_out[0,:,1:,:] - img_out[0,:,:-1,:]))
                loss -= a.sharp * (ly+lx)

            imgs_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, micro=1.)
            out_enc = model_clip.encode_image(imgs_sliced[-1])
            loss -= torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
            if a.notext > 0:
                loss += a.notext * torch.cosine_similarity(txt_plot_enc, out_enc, dim=-1).mean()
            if a.diverse != 0:
                imgs_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, micro=1.)
                out_enc2 = model_clip.encode_image(imgs_sliced[-1])
                loss += a.diverse * torch.cosine_similarity(out_enc, out_enc2, dim=-1).mean()
                del out_enc2; torch.cuda.empty_cache()
            if a.expand > 0:
                global prev_enc
                if i > 0:
                    loss += a.expand * torch.cosine_similarity(out_enc, prev_enc, dim=-1).mean()
                prev_enc = out_enc.detach().clone()
            if a.in_txt0 is not None: # subtract text
                loss += torch.cosine_similarity(txt_enc0, out_enc, dim=-1).mean()
            del img_out, imgs_sliced, out_enc; torch.cuda.empty_cache()

            if a.prog is True:
                lr_cur = lr0 + (i / a.steps) * (lr1 - lr0)
                for g in optimizer.param_groups: 
                    g['lr'] = lr_cur
        
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % a.fstep == 0:
                with torch.no_grad():
                    img = image_f(contrast=a.contrast).cpu().numpy()[0]
                if a.sharp != 0:
                    img = img **1.3 # empirical tone mapping
                checkout(img, os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)), verbose=a.verbose)
                pbar.upd()
                del img

        if a.keep > 0:
            global params_start, params_ema
            params_ema = ema(params_ema, params[0].detach().clone(), num+1)
            torch.save((1-a.keep) * params_start + a.keep * params_ema, 'init.pt')
        
        torch.save(params[0], '%s.pt' % os.path.join(workdir, out_name))
        shutil.copy(img_list(tempdir)[-1], os.path.join(workdir, '%s-%d.jpg' % (out_name, a.steps)))
        os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' % (tempdir, os.path.join(workdir, out_name)))

    with open(a.in_txt, 'r', encoding="utf-8") as f:
        texts = f.readlines()
        texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
    if a.verbose is True: 
        print(' total lines:', len(texts))
        print(' samples:', a.samples)

    for i, txt in enumerate(texts):
        process(txt, i)

    vsteps = int(a.length * 25 / len(texts)) # 25 fps
    tempdir = os.path.join(workdir, '_final')
    os.makedirs(tempdir, exist_ok=True)
    
    def read_pt(file):
        return torch.load(file).cuda()

    if a.verbose is True: print(' rendering complete piece')
    ptfiles = file_list(workdir, 'pt')
    pbar = ProgressBar(vsteps * len(ptfiles))
    for px in range(len(ptfiles)):
        params1 = read_pt(ptfiles[px])
        params2 = read_pt(ptfiles[(px+1) % len(ptfiles)])

        params, image_f = fft_image([1, 3, *a.size], resume=params1, sd=1., decay_power=a.decay)
        image_f = to_valid_rgb(image_f, colors = a.colors)

        for i in range(vsteps):
            with torch.no_grad():
                img = image_f((params2 - params1) * math.sin(1.5708 * i/vsteps)**2)[0].permute(1,2,0)
                img = torch.clip(img*255, 0, 255).cpu().numpy().astype(np.uint8)
            imsave(os.path.join(tempdir, '%05d.jpg' % (px * vsteps + i)), img)
            if a.verbose is True: cvshow(img)
            pbar.upd()

    os.system('ffmpeg -v warning -y -i %s\%%05d.jpg "%s.mp4"' % (tempdir, os.path.join(a.out_dir, basename(a.in_txt))))
    if a.keep > 0: os.remove('init.pt')
예제 #31
0
def run(i,
        data_path,
        extension,
        save_folder,
        GRP=False,
        compute_p=True,
        calc_type='ksg',
        use_population_parameters=False,
        history_lengths=range(1, 6),
        delays=range(1, 2),
        **preprocessing_params):
    """
    Run AIS calculation for a particular subject, using parameters which are either selected by 
    maximising the bias corrected average AIS for the individual subject, or parameters which are
    first determined by averaging the AIS across the population
    
    Arguments:
        i -- An Int which states which file or subject to load and process
        data_path -- Location of the data files
        extension -- File extension of the data (eg. .csv, .tsv, .mat)
        save_folder -- Subfolder of the 'Results' directory in which to save the local AIS values, parameters and p_values
        GRP -- Set to True if processing the GRP data, which is one array of dimension (region, timepoints, subject)
        compute_p -- If True, computes the p value of the returned AIS
        calc_type -- The type of estimator to use for the JIDT calculator - 'gaussian' or 'ksg'
        history_lengths -- Range of possible history length values
        delays -- Range of possible delay values
        preprocessing_params -- Parameters passed to utils.preprocess for preprocessing the time series data.
                                Includes sampling_rate / sampling_interval, mean_processing_type, trim_start, trim_end,
                                fcutlow, fcuthigh, use_filtfilt
    """
    start_time = time.time()
    files = utils.getAllFiles(data_path, extension)
    if GRP:
        file = files[0]
        filename = '{:02}'.format(i)  # Save the results by the subjects number
        subject_id = i
    else:
        file = files[i]
        filename = utils.basename(file)
        subject_id = None
    # Makes folders to save the results
    os.makedirs("Results/{}/AIS/params".format(save_folder), exist_ok=True)
    os.makedirs("Results/{}/AIS/p_values".format(save_folder), exist_ok=True)

    print("Processing file {}: {}".format(i, filename))
    if os.path.exists('Results/{}/AIS/p_values/{}_p.csv'.format(
            save_folder, filename)):
        print("Result already present")
        exit()  # If the results have already been saved, avoid running again

    calc = startCalc(calc_type)

    # Load and preprocess data
    df = utils.loadData(file, subject_id=subject_id)
    data = utils.preprocess(df, **preprocessing_params)

    if use_population_parameters:
        # If using population parameters, either load the pre-calculated parameters, or calculate them and save to file
        if os.path.exists(
                'Results/{}/AIS/population_params.csv'.format(save_folder)):
            parameters = pd.read_csv(
                'Results/{}/AIS/population_params.csv'.format(save_folder))
        else:
            parameters = getPopulationParameters(data_path, extension, calc,
                                                 history_lengths, delays,
                                                 **preprocessing_params)
            pd.DataFrame(parameters, columns=['k', 'tau']).to_csv(
                'Results/{}/AIS/population_params.csv'.format(save_folder),
                index=None)
    else:
        parameters = None

    results, all_parameters, p_values = getLocalsForAllRegions(
        data,
        calc,
        history_lengths,
        delays,
        parameters,
        print_max_idx=False,
        compute_p=compute_p)

    # Add back the trimmed sections at the start and end of the timeseries by padding with zeros
    padding = ((0, 0), (preprocessing_params.get('trim_start', 0),
                        preprocessing_params.get('trim_end', 0)))
    results = np.pad(results, padding, mode='constant', constant_values=0)

    # Save results
    pd.DataFrame(results).to_csv('Results/{}/AIS/{}_AIS.csv'.format(
        save_folder, filename),
                                 index=None,
                                 header=None)
    params_df = pd.DataFrame(all_parameters, columns=['k', 'tau', 'DCE'])
    params_df.to_csv('Results/{}/AIS/params/{}_params.csv'.format(
        save_folder, filename),
                     index=None)
    pd.DataFrame(p_values).to_csv('Results/{}/AIS/p_values/{}_p.csv'.format(
        save_folder, filename),
                                  index=None,
                                  header=None)

    print("\nTime taken:", round((time.time() - start_time) / 60, 2), 'min')