示例#1
0
def generate(source: Path, destination: Path):
  destination.mkdir(exist_ok=True)
  with open(source / 'defaults.json', 'r') as f:
    data = json.loads(f.read())
    with alive_bar(len(data)) as bar:
      for item in data:
        with open(destination / f'patch-0-{time_ns()}.json', 'w') as out:
          out.write(str(
            Patchfile(f'/groups/{item["identifier"]}').write(item)
          ))
          bar()
示例#2
0
 def _load_bytes(self, path: Path, parser: Callable[[str], None], raw: bool = False) -> None:
     
     with open(path, 'rb') as file:
         file_lines = file.readlines()
         file_length = len(file_lines)
         
         with alive_bar(file_length, title=path.name, monitor=False, bar='classic', spinner='twirl') as bar:
             for line in file_lines:
                 if parser(line, raw):
                     print('Found everything early')
                     break
                 bar()
示例#3
0
def CuadraturasRemesas(patrimonio, fechaCorteInicio, fechaCorteFin):
    try:
        diferenciasTRX = dict()
        restaTrxRemesas = dict()
        montosRemesaTrx = dict()

        with alive_bar(5) as bar:

            diferenciaRemesas, cuentasConDiferencias = DiferenciasRemesas(
                patrimonio, fechaCorteInicio, fechaCorteFin)
            bar()
            print('Diferencias por Remesas Ok')

            movimientosDuplicados = MovimientosDuplicados(
                patrimonio, fechaCorteInicio, fechaCorteFin)
            bar()
            print('Movimientos duplicados Ok')

            for fechaConDiferencias in diferenciaRemesas.keys():
                diferenciasTRX[
                    fechaConDiferencias] = DiferenciasAsientosContables(
                        patrimonio, fechaConDiferencias)

                if diferenciasTRX[fechaConDiferencias].get(
                        'DIFERENCIA_TRX') and diferenciaRemesas[
                            fechaConDiferencias].get('DIFERENCIA_POR_CUENTAS'):
                    montoTRX = diferenciasTRX[fechaConDiferencias][
                        'DIFERENCIA_TRX']
                    montoRemesas = diferenciaRemesas[fechaConDiferencias][
                        'DIFERENCIA_POR_CUENTAS']
                    restaTrxRemesas[
                        fechaConDiferencias] = montoTRX + montoRemesas
                    montosRemesaTrx[fechaConDiferencias] = {
                        'DIFERENCIA_TRX': montoTRX,
                        'DIFERENCIA_REMESA': montoRemesas
                    }
            bar()
            print('Diferencias por TRX Ok')

            crearXlsDescuadraturas(patrimonio, fechaCorteInicio, fechaCorteFin,
                                   cuentasConDiferencias, montosRemesaTrx,
                                   movimientosDuplicados)
            bar()
            print('XLSX Creado')

            ScriptSqlUpdateTrx.crearSql(patrimonio,
                                        list(diferenciaRemesas.keys()))
            bar()
            print('Script SQL Creado')

        return True
    except Exception as e:
        raise Exception('Error en CuadraturasRemesas: %s' % e)
示例#4
0
def save_pass(section: str = ''):

    file_name_2a = configure.get(section, 'file_name1')
    file_name_2b = configure.get(section, 'file_name2')
    located_path = configure.get(section, 'located_path')
    file_name = [
        file_browser(file_name_2a, located_path),
        file_browser(file_name_2b, located_path)
    ]
    schema = 'groundstation'
    table_name = 'planning_pass'
    col_value_dict = {
        'sat_id': '',
        'sat_name': '',
        'AOS': '',
        'LOS': '',
        'Duration': '',
        'station_id': '',
    }

    coonn = connexion()
    curr = coonn.cur
    delete_from_table(schema, table_name)
    columns = list(col_value_dict.keys())
    #values = list(col_value_dict.values())  # liste append

    for f in file_name:
        #print(f)
        values = extract_pass_2a(f)
        try:
            with alive_bar(len(values)) as bar:
                c = 0
                for p in values:
                    #print(p)
                    # print(list(p ))
                    sql = (f'INSERT INTO {schema}."{table_name}" ("' +
                           '", "'.join(['%s'] * len(columns)) + '"' +
                           ") ") % tuple(columns) + "VALUES (" + ", ".join(
                               ["%s"] * len(columns)) + ")"
                    curr.execute(sql, list(p))
                    coonn.commit()
                    c += 1
                    bar()
                    time.sleep(0.01)
                print(colored(f"{c}:", "green"), ("pass are saved "))

        except Exception as e:
            print(colored(f'any pass is not  saved  because {e}', "red"))

    print(
        "\n\n                    *********  New planing pass list ********* \n  "
    )
    pass_existing()
def reverse_range(range_ip):
    def compute():
        for i in range(255):
            time.sleep(.01)  # process items
            yield
            ip = str(range_ip) + "." + str(i)
            reverse_dnslookup(ip)
        read_output_file()

    with alive_bar(255) as bar:
        for i in compute():
            bar()
示例#6
0
def extract_frames(input_file, output_folder, verbose=False):
    """Extract video frames to a folder
    for -vsync: "crf" will use "r_frame_rate", "vfr" will use "avg_frame_rate"
    `ffmpeg -i "$i" original_frames/%06d.png`
    """
    # TODO add downscaling option
    stream_metadata = ffprobe.analyze_video_stream_metadata(input_file)
    frame_count = int(stream_metadata["packetCount"])
    vfrBool = (stream_metadata["fpsReal"] != stream_metadata["fpsAverage"]
               )  # Video is cfr when average fps = real fps

    pathlib.Path(output_folder).mkdir(parents=True,
                                      exist_ok=True)  # Create outputFolder
    cmd = [
        definitions.FFMPEG_BIN,
        "-i",
        input_file,
        "-vsync",
        "cfr",
        "-pix_fmt",
        "rgb24",  # Usually defaults to rgba which causes alpha problems
        os.path.join(output_folder, "%06d.png")
    ]
    if verbose is True:
        print(" ".join(cmd))
    # subprocess.run(cmd)
    if vfrBool is True:
        with subprocess.Popen(cmd,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              bufsize=1,
                              universal_newlines=True) as process:
            for line in process.stderr:
                if line.startswith("frame="):
                    print(line, end="")
    else:
        with alive_bar(frame_count, enrich_print=False) as bar:
            frame_count_processed_last = 0
            with subprocess.Popen(cmd,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE,
                                  bufsize=1,
                                  universal_newlines=True) as process:
                for line in process.stderr:
                    if line.startswith("frame="):
                        # Parses inbetween "frame=" and "fps="
                        frame_count_processed = int(
                            re.findall(r"frame=(.+?)fps=", line)[0])
                        bar(incr=(frame_count_processed -
                                  frame_count_processed_last))
                        frame_count_processed_last = frame_count_processed
                    elif verbose is True:
                        print(line, end="")
async def fetch_comments(article):
    """
     Open session for fetching multiple comments in
     Asynchronous way.
    :param article: article to fetch comments from him
    :return: dict of comments.
    """
    async with ClientSession() as session:
        # init progress bar
        with alive_bar(article['descendants'], "Collecting comments") as bar:
            data = (await fetch_comment(session, article['id'], bar))
            return data
 def run(self) -> None:
     """Run preprocessing pipeline over the input file list."""
     with alive_bar(len(self.transcript_paths)) as bar:
         for path in self.transcript_paths:
             transcript = self.load_transcript(path)
             with atomic_write(path.with_suffix(".text"), mode="w", encoding="utf-8") as textf:
                 bytecount = textf.write(path.stem)
                 transcript, unique_words = self.preprocess_transcript(transcript, textf, path)
                 if textf.tell() == bytecount:
                     self.log.warning(f"Preprocessing output was empty for {path.stem}.")
             self.write_transcript_and_words(path, transcript, unique_words)
             bar()
示例#9
0
def search(location: Path, patterns: list[str]) -> list[Path]:
    """Return the paths which match the given set of patterns from any location."""
    def match_pattern(pattern: str) -> list[Path]:
        matches = []
        for match in location.rglob(pattern):
            matches.append(match)
            progress_bar()
        return matches

    with alive_bar() as progress_bar:
        match_lists = [match_pattern(pattern) for pattern in patterns]
    return list(set(chain.from_iterable(match_lists)))
示例#10
0
文件: IvyCM.py 项目: not4il/Ivy
def makecombo(usernames, passwords, between):
    a = len(usernames)
    b = len(passwords)
    with alive_bar(a * b) as bar:
        for i in range(a):
            for j in range(b):
                combolist.append(f'{usernames[i]}{between}{passwords[j]}\n')
                k = random.sample(combolist, len(combolist))
                bar()
    file = open('combolist.txt', 'a')
    for i in range(0, a * b):
        file.write(k[i])
示例#11
0
def interpolate_folder_mode(input_folder,
                            output_folder,
                            multiplier=DEFAULT_MULTIPLIER):
    # TODO srite images during interpolation to avoid disk write bottleneck at end
    # List images in folder
    input_files_path = []
    for filePath in pathlib.Path(input_folder).glob(
            '**/*'):  # List all files in the directory as their absolute path
        file_path_absolute = os.path.normpath(filePath.absolute())
        if file_path_absolute.endswith(
            ('.png', '.jpg', '.jpeg', '.webp', '.bmp')):
            # Only adds files that have image extensions, fixes problems caused by "Thumbs.db"
            input_files_path.append(file_path_absolute)
    input_files_path.sort()

    # Read frame dimensions from first frame
    height, width = _read_image_dimensions(input_files_path[0])

    # Read files
    input_files = []
    for i in range(len(input_files_path)):
        # print(i)
        input_files.append(_read_image(input_files_path[i]))

    # Interpolate
    multiplier_internal = 1
    while multiplier_internal < multiplier:
        multiplier_internal = multiplier_internal * 2
        print("Interpolating to {}x:".format(multiplier_internal))
        with alive_bar(len(input_files) * 2, enrich_print=False) as bar:
            output_files = []
            for j in range(len(input_files) - 1):
                # print(input_files_path[j], input_files_path[j + 1])
                mid = rife_model.inference(input_files[j], input_files[j + 1])
                output_files.append(input_files[j])
                bar()
                output_files.append(mid)
                bar()
            for _ in range(2):  # Duplicate last frame twice
                output_files.append(input_files[-1])
                bar()
        if multiplier_internal < multiplier:
            input_files = output_files

    if os.path.isdir(output_folder
                     ):  # Delete output_folder if it exists to avoid conflicts
        shutil.rmtree(output_folder)
    pathlib.Path(output_folder).mkdir(parents=True,
                                      exist_ok=True)  # Create output_folder
    for i in range(len(output_files)):
        cv2.imwrite(os.path.join(output_folder, "{:06d}.png".format(i + 1)),
                    (output_files[i][0] * 255).byte().cpu().numpy().transpose(
                        1, 2, 0)[:height, :width])
示例#12
0
def send_progress(device, data):
    """
    coolness factor
    :param data:
    :return:
    """
    data = serialize(data)
    sz = len(data)
    with alive_bar(sz, bar="filling") as bar:
        for i in range(sz):
            device.write(data[i])
            bar()
示例#13
0
def extract():#从训练集中随机提取构成验证集/测试集
    print('Start extracting...')
    paths= glob.glob(train_path+'/*.jpg')
    with alive_bar(extract_scale) as bar:
        for _ in range(extract_scale):
            bar()
            path=random.choice(paths)
            name=path.split('\\')[-1].split('.')[0]
            newpath=test_path+'/'+name+'.jpg'
            os.rename(path,newpath)
            paths.remove(path)
    print('Completed.')
def GetCards(setid, showBar):
    set_url = "https://www.neonmob.com/api/setts/" + str(setid) + "/"
    data = requests.request('GET', set_url).json()
    set_name = data['name']
    total = 0
    for cat in range(len(data['core_stats'])):
        total += data['core_stats'][cat]['total']
    for cat in range(len(data['special_stats'])):
        total += data['special_stats'][cat]['total']

    print("\nGetting cards from series \"" + set_name + "\"...")
    cards = []
    nxt = "/api/sets/" + str(setid) + "/pieces/"
    with conditional(showBar,
                     alive_bar(total, bar='smooth',
                               spinner='dots_recur')) as bar:
        first = True
        while True:
            raw = requests.request('GET', "https://www.neonmob.com" + nxt)
            if raw.status_code == 500 and first:
                print("Using fallback card endpoint...")
                raw = requests.request(
                    'GET', "https://www.neonmob.com/api/sets/" + str(setid) +
                    "/piece-names")
                data = raw.json()
                for card in data:
                    cards.append({
                        'name': card['name'],
                        'id': card['id'],
                        'setName': set_name
                    })
                    if showBar:
                        bar()
                if not showBar:
                    print('...', end="", flush=True)
                break
            else:
                data = raw.json()
                nxt = data['payload']['metadata']['resultset']['link']['next']
                for card in data['payload']['results']:
                    cards.append({
                        'name': card['name'],
                        'id': card['id'],
                        'setName': set_name
                    })
                    if showBar:
                        bar()
                if not showBar:
                    print(". ", end="", flush=True)
                first = False
                if not nxt:
                    break
    return cards
示例#15
0
 def rank_page(self):
     with alive_bar(10) as bar:
         pagenum = 0
         while True:
             if self.counter.bar_breaker:
                 break
             if self.counter.pagenum > pagenum:
                 for i in range(self.counter.pagenum - pagenum):
                     bar(text=f"正在处理{self.counter.pagenum}页数据")
                     pagenum = self.counter.pagenum
             if pagenum == 10:
                 break
示例#16
0
def generate_success_dataframe(target_directory):
    """
        Creates a pandas data fame from JSON files present at the given failure location.
        Assumes that all these JSON files have valid bar data.
        :param target_directory: location to read JSON files from
    """
    stdout.write(f'=> Generating dataframe for success tickers...\n')

    def _get_ticker_id(file_name):
        return int(file_name.split(sep)[-1].split('.')[0])
    # create a place holder dataframe
    expected_columns = ['time_stamp', 'ecode', 'session', 'high', 'low', 'close',
                        'volume', 'average', 'count']
    data = pd.DataFrame(columns=expected_columns)

    # create temporary directory to store smaller CSV files
    temp_directory = '.temp'
    make_dirs(temp_directory)

    # extract all json files from target directory
    success_file_pattern = join(target_directory, '*.json')
    success_files = glob(success_file_pattern)
    total = len(success_files)

    if bool(total):
        json_generator = (read_json_file(file) for file in success_files)
        counter = 0  # to count temp files
        with alive_bar(total=total, **_BAR_CONFIG) as bar:
            for i in range(total):
                ticker_data = next(json_generator)
                bar_data, meta_data = ticker_data['bar_data'], ticker_data['meta_data']
                temp_data = pd.DataFrame(bar_data)
                temp_data['ecode'] = meta_data.get('ecode', _get_ticker_id(success_files[i]))
                data = data.append(temp_data)
                _time_to_cache = (i+1 == total) or ((i > 0) and (i % 100 == 0))
                if _time_to_cache:
                    if data.shape[0] > 0:
                        temp_file = join(temp_directory, f'success_{counter}.csv')
                        data.to_csv(temp_file)
                        data = pd.DataFrame(columns=expected_columns)
                        counter += 1
                bar()

        # merge all CSV files into a single dataframe
        # delete all temp files
        temp_files = glob(join(temp_directory, 'success_*.csv'))
        data = pd.concat(map(read_csv, temp_files))
        data.sort_values(by=['ecode', 'time_stamp'], inplace=True, ignore_index=True)
        data = data[expected_columns]
    delete_directory(temp_directory)

    return data
def fetch_article_by_rank():
    """
        Fetching article by given rank from user input
        and printing all article comments, rank should be limit.
        according to HN api's docs there is a limit of
        Up to 500 top articles returning from the api.
    """
    try:

        rank = input("Enter a rank number between 1 - 500: ")
        # input return string so converting to int

        if not validate_input_number(1, int(rank), 500):
            raise ArticleRankException("Rank out of range")

        # ranking indexing begin from 0
        indexed_rank = str(int(rank) - 1)
        # query top articles by filtering data using startAt and endAt filters
        # range of articles will be by the rank which the user provided
        query = "topstories.json?orderBy=%22$key%22&startAt=%22{0}" \
                "%22&endAt=%22{1}%22" \
            .format(indexed_rank, indexed_rank)
        url_to_fetch = ''.join([HACKER_NEWS_API_BASE_URL, query])

        # fetching top stories limit by rank value
        article = {}
        with alive_bar(None, "Fetching article") as bar:
            response = requests.get(url_to_fetch)
            bar()

            # decoding data to json format
            articles_id = response.json()
            if not indexed_rank in articles_id:
                raise ArticleNotFound("Not found article with rank %s" % rank)

            # generate item url based on founded article id .
            article = requests.get(HACKER_NEWS_API_ITEM_URL %
                                   (articles_id[indexed_rank])).json()
            bar()
        if 'kids' in article:
            logging.info("Found article: %s" % article['title'])
            logging.info("Fetching and printing comments, please wait...")
            loop = asyncio.get_event_loop()
            result = loop.run_until_complete(fetch_comments(article))
            print_comment(result)
        else:
            logging.info("No comments found for article with rank %s", rank)

    except ValueError:
        logging.error('Invalid input')
    except Exception as err:
        logging.error(err)
    def lemmatize_docs(self,
                       docs,
                       allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
        print('Start lemmatizing docs...')
        print('\tRemoving stop words...')
        # Remove stopwords (le, la, les, ...)
        docs = [[
            word for word in simple_preprocess(str(doc))
            if word not in self.stop_words
        ] for doc in docs]

        # Form bigrams and trigrams (higher threshold fewer phrases)
        # Après un découpage en mots, on ne considère plus l’ordre avec une
        # approche bag-of-words. Si l’information contenu par l’ordre des mots
        # s’avère importante, il faut considérer un découpage en couple de mots
        # (bi-grammes), triplets de mots (3-grammes)...
        print('\tForming bigrams and trigrams...')
        bigram = gensim.models.Phrases(docs, min_count=5, threshold=100)
        trigram = gensim.models.Phrases(bigram[docs], threshold=100)
        bigram_mod = gensim.models.phrases.Phraser(bigram)
        trigram_mod = gensim.models.phrases.Phraser(trigram)
        # print('Trigram example:', trigram_mod[bigram_mod[docs[0]]], '\n')
        docs = [bigram_mod[doc] for doc in docs]
        docs = [trigram_mod[bigram_mod[doc]] for doc in docs]

        # Lemmatization (petits, petites, petit -> petit)
        print('\tLemmatizing...')
        texts_out = []
        # pip install --upgrade spacy # to get v3.0
        # python -m spacy download fr_dep_news_trf
        # fr_dep_news_trf = French transformer pipeline (camembert-base).
        # Components: transformer, morphologizer, parser, attribute_ruler,
        # lemmatizer.
        nlp = spacy.load('fr_dep_news_trf', disable=['parser', 'ner'])
        with alive_bar(len(docs), force_tty=1, spinner='ball_bouncing') as bar:
            for sentence in docs:
                doc = nlp(" ".join(sentence))
                #            spacy.displacy.render(doc, style='ent', jupyter=True)
                texts_out.append([
                    token.lemma_ for token in doc
                    if token.pos_ in allowed_postags
                ])
                bar()

        # Remove stopwords once more after lemmatization
        print('\tRemoving stop words after lemmatization...')
        texts_out = [[
            word for word in simple_preprocess(str(doc))
            if word not in self.stop_words
        ] for doc in texts_out]
        print('Docs lemmatized.')
        return texts_out
def market_data(
    filters: Optional[Dict[str, Any]] = None,
    progress_bar: bool = False,
    timeout: float = 1.0,
) -> ItemStats:
    """Collect data from warframe market API.

    Collects information from the warframe market API. Waits 1 second every
    10 items collected. This is to ensure we do not exceed limits.

    Args:
        filters: A dictionary of `ShortItem` attributes to values that will
            be used to filter the items for which statistics will be collected. If
            no dictionary is provided, then the default dict of None filters nothing.
        progress_bar: Whether to use a progress bar or not.
        timeout: The number of seconds to wait every ten items in order to reduce
            the load on warframe.market API.

    Returns:
        An object of `ItemStats` that contains all the collected data.
    """
    if filters is None:
        filters = {}
    json_data = from_url(ITEMS_URL)
    json_data = collect_data(json_data, ["payload", "items"])
    items = to_class(ShortItem, json_data)
    prime_items = []
    prime_stats = []
    if progress_bar:
        cm = alive_bar(len(items))
    else:
        cm = nullcontext()
    with cm as bar:
        for j, i in enumerate(items):
            matched = all([getattr(i, f) == val for f, val in filters.items()])
            if matched is True:
                json_data = from_url(STATS_URL % (i.url_name))
                stat_closed = collect_data(
                    json_data, ["payload", "statistics_closed", "90days"])
                stat_live = collect_data(
                    json_data, ["payload", "statistics_live", "48hours"])
                stat_closed = to_class(Stats, stat_closed)
                stat_live = to_class(LiveStats, stat_live)
                stat = to_stats(stat_closed, stat_live, i.item_name)
                prime_stats.append(stat)
                prime_items.append(i)
                if j % 10 == 0:
                    time.sleep(timeout)
            if type(cm) is not nullcontext:
                bar()

    return ItemStats(prime_items, prime_stats)
示例#20
0
def scan_phase(args, api, logger, samples):
    with alive_bar(total=len(samples), title="[SCAN] Samples") as bar:
        for sample in samples:
            analysis = api.scan_file(sample)
            while analysis['response_code'] != 200:
                print("Error: Reached maximum requests/minute. "
                      "Sleeping 60 seconds.")
                time.sleep(60)
                analysis = api.scan_file(sample)
            logger.write(
                f"{os.path.basename(sample):<40s}\t"
                f"{analysis['results']['permalink'].split('f-')[0]}\n")
            bar()
示例#21
0
def TME3():
    with open("pairs.txt", 'r') as f:
        text = f.read()
    text = text.split('\n')
    pairs = []
    with alive_bar(len(text), "Reading file") as bar:
        for line in text:
            line = re.sub('\s+', ' ', line)
            line = line.split(' ')
            ele = int(line[2]), int(line[4])
            pairs.append(ele)
            bar()
    print(ChineseRemainder(pairs)[0])
示例#22
0
def Dirichlet(msh, physical_tag, g, T, B):
    """
    Applique la condition de Dirichlet à la matrice B et à la matrice T qui reprèsente A
    dans le système AU = B
    """
    Indexes_points = []
    with alive_bar(len(msh.segments)) as bar:
        for s in msh.segments:
            if(s.tag == physical_tag):
                for p in s.p:
                    Indexes_points.append(p.id)
            bar()
    
    with alive_bar(len(Indexes_points)) as bar:
        for i in Indexes_points:
            for indx in range(len(T.data[0])) :
                if(T.data[1][0][indx] == i):
                    T.data[0][indx] = 0
            T.append(i,i,1)
            B[i] = g
            bar()
    return 
示例#23
0
def progress(iter_, func):
    '''
    >>> import time
    >>> def func(x): time.sleep(0.1)
    >>> progress(range(50), func)
    '''
    # pylint: disable = disallowed-name
    with alive_progress.alive_bar(total=len(iter_),
                                  bar='circles',
                                  spinner='dots_reverse') as bar:
        for item in iter_:
            func(item)
            bar()
示例#24
0
def perform_analysis(media_file_path, colors_df, is_video, match_colors,
                     show_progress):
    msg = """
            This may take awhile, but it will run in the background.
            A pop-up will appear when the analysis is done.
            Click OK to begin the analysis.
            """
    showinfo('Analysis Started', msg)
    if is_video:
        cap = cv2.VideoCapture(media_file_path)
        num_frames = count_frames(media_file_path)
        frame_width = cap.get(3)
        frame_height = cap.get(4)
        scale = 1
        if frame_width > 100 or frame_height > 100:
            scale = get_scale(100, frame_width, frame_height)
        new_width = int(scale * frame_width)
        new_height = int(scale * frame_height)
        num_pixels = num_frames * new_width * new_height

        if show_progress:
            with alive_bar(num_pixels) as bar:
                for y in analyze_video_with_progress(cap, colors_df,
                                                     match_colors):
                    bar()
        else:
            analyze_video(cap, colors_df, match_colors)

        cap.release()
    else:
        img = cv2.imread(media_file_path)
        img = resize_image(img, is_video)
        if show_progress:
            with alive_bar(img.shape[0] * img.shape[1]) as bar:
                for y in analyze_image_with_progress(img, colors_df,
                                                     match_colors):
                    bar()
        else:
            analyze_image(img, colors_df, match_colors)
示例#25
0
def interpolate_folder_mode(input_folder,
                            output_folder,
                            gpu_id=DEFAULT_GPU_ID,
                            threads=DEFAULT_THREADS,
                            verbose=False,
                            **kwargs):
    """Folder-mode Interpolation"""
    target_frames = len(os.listdir(input_folder)) * 2

    if os.path.isdir(output_folder
                     ):  # Delete output_folder if it exists to avoid conflicts
        shutil.rmtree(output_folder)
    pathlib.Path(output_folder).mkdir(parents=True,
                                      exist_ok=True)  # Create output_folder

    cmd = [
        definitions.RIFE_NCNN_VULKAN_BIN, "-i",
        os.path.abspath(input_folder), "-o",
        os.path.abspath(output_folder), "-g",
        str(gpu_id), "-j", threads, "-v"
    ]
    if verbose is True:
        print(" ".join(cmd))
    # subprocess.run(cmd, cwd=definitions.RIFE_NCNN_VULKAN_LOCATION)
    with alive_bar(target_frames, enrich_print=False) as bar:
        with subprocess.Popen(cmd,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              cwd=definitions.RIFE_NCNN_VULKAN_LOCATION,
                              bufsize=1,
                              universal_newlines=True) as process:
            for line in process.stderr:
                if line.startswith("["):  # Starting GPU info
                    print(line, end="")
                elif line.endswith("done\n"):  # Verbose progress output
                    bar()
                elif line.startswith(("find_blob_index_by_name",
                                      "fopen")):  # Model not found error
                    raise OSError("Model not found: {}".format(
                        line.replace("\n", "")))
                elif line.startswith(
                        "vkAllocateMemory failed"):  # VRAM memory error
                    raise RuntimeError("VRAM memory error: {}".format(
                        line.replace("\n", "")))
                elif line.startswith(
                    ("vkWaitForFences failed",
                     "vkQueueSubmit failed")):  # General vulkan error
                    raise RuntimeError("Vulkan error: {}".format(
                        line.replace("\n", "")))
                else:
                    print(line, end="")
示例#26
0
def main():

    # shows dialog box and return the path of source and destination folder
    sourceFolderPath = askdirectory(title='Select Source Folder')
    destinationFolderPath = askdirectory(title='Select Destination Folder')

    # get all files from source folder
    allFiles = os.listdir(sourceFolderPath)

    # define progress bar
    with alive_bar(len(allFiles), title='Renaming...') as bar:
        for file in allFiles:
            name, extension = file.split('.')

            splittedName = name.split('_')
            splittedNameCount = len(splittedName)

            correctName = ""

            # checking if image/video has prefix (eg. IMG, VID, PIX, ...)
            if splittedNameCount == 3:
                splittedName.pop(0)
                correctName = '_'.join(splittedName) + '.' + extension
            elif splittedNameCount == 2:
                correctName = file
            else:
                # this is for invalid files
                correctName = ""

            fileSource = sourceFolderPath + '/' + file
            fileDestination = destinationFolderPath + '/'

            if correctName != "":
                fileDestination = fileDestination + correctName
            else:
                # all invalid files are copied in invalidFiles folder
                invalidFilesPath = fileDestination + 'invalidFiles'
                if not os.path.exists(invalidFilesPath):
                    os.mkdir(fileDestination + 'invalidFiles')

                fileDestination = invalidFilesPath + '/' + file

            # if file is already renamed skip copying (for eg. if app is started twice)
            if not os.path.exists(fileDestination):
                # copy2 is saving all metadata
                shutil.copy2(fileSource, fileDestination)
            else:
                bar.text("Copying file skipped: File already renamed")

            # update progress bar
            bar()
示例#27
0
def process():
    fin = open(FIN, 'r')
    fout = open(FOUT, 'w')

    bits = [int(bit) for bit in next(fin).split(' ') if bit != '\n']
    n_samples = int(next(fin))
    ns = [int(n) for n in next(fin).split(' ') if n != '\n']
    total = n_samples * len(bits) * len(ns)

    ints = []
    for line in fin:
        a, b = [int(num) for num in line.split(' ')]
        ints.append((a, b))

    running_times = []
    with alive_bar(total) as bar:
        for n in ns:
            print('----- In Z_{} -----'.format(n))
            zn = Zn(n)
            mib = []
            mie = []
            me = []

            for a, b in ints:
                res, res_time = process_sample(zn.consider(a), zn.consider(b))
                res += res_time
                for r in res:
                    fout.write('%s ' % r)
                fout.write('\n')
                mib.append(res_time[0])
                mie.append(res_time[1])
                me.append(res_time[2])
                bar()

            sums = [0, 0, 0]
            for i in range(len(mib)):
                sums[0] += mib[i]
                sums[1] += mie[i]
                sums[2] += me[i]

                if i % n_samples == n_samples - 1:
                    avgs = [s / n_samples for s in sums]
                    for avg in avgs:
                        fout.write('%s ' % avg)
                    fout.write('\n')
                    sums = [0, 0, 0]

            fout.write('------------------------\n')

    fin.close()
    fout.close()
    def process_notebook_pages(self):
        self.logger.info(
            f"Processing note book {self.title} - {self.notebook_id}")

        if not config.yanom_globals.is_silent:
            print(f"Processing '{self.title}' Notebook")
            with alive_bar(len(self.note_pages), bar='blocks') as bar:
                for note_page in self.note_pages:
                    self._process_page(note_page, bar)

            return

        for note_page in self.note_pages:
            self._process_page(note_page)
示例#29
0
def match_groups_friends(groups_list, friends_limit, user_id):
    output_groups = []
    offset = 0
    friends_list = get_friends_list(user_id, offset)
    while len(friends_list) != 0:
        with alive_bar(len(groups_list)) as bar:
            print("\nMatching friends and groups...")
            for group in groups_list:
                bar()
                output_groups = is_member(output_groups, group, friends_list,
                                          friends_limit)
        offset += 200
        friends_list = get_friends_list(user_id, offset)
    return output_groups
示例#30
0
def portfolio_weights_ew(tickers, start_date, end_date,
                         portfolio_rebalance_period):
    business_days_end_months = pd.date_range(start_date,
                                             end_date,
                                             freq=portfolio_rebalance_period)
    portfolio_weights = pd.DataFrame(index=business_days_end_months,
                                     columns=tickers)

    with alive_bar(len(business_days_end_months)) as bar:
        for t in business_days_end_months:
            portfolio_weights.loc[t] = ew_weights(tickers, t)
            bar()

    return portfolio_weights