def uploadfile():
    filename = input(colored(" [system]: filename: ", "white"))
    flnm = filename
    time.sleep(1)
    if os.path.isfile(filename):
        print("")
        print(" [system]: size: ", str(os.path.getsize(filename)))
        asw = input(
            colored(
                " [system]: are u sure to send {} (y/n): ".format(filename),
                "white"))
        if asw == 'y':
            ################################
            suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
            bar = PixelBar(" [system]: uploading ", suffix=suffix)
            for i in bar.iter(range(100)):
                sleep()
            ##################################
            ftp.storbinary('STOR ' + filename, open(filename, 'rb'))
            time.sleep(1.2)
            print("")
            print(colored(" [system]: file uploded", "white"))
            ftp.quit()
        else:
            print(" [system]: operartion aborted")
            ftp.quit()
    else:
        print(" [system]: ERROR ! no such file named {} exists.".format(
            filename))
Exemple #2
0
	def run( self ):
		print('*'*100)
		saveResultFile = None
		if self._config.get('result') == None or len(self._config.get('result').strip()) == 0:
			saveResultFile = os.path.join(os.getcwd() , 'result.txt') 
		else:
			saveResultFile = self._config.get('result')
		if os.path.exists(saveResultFile):os.unlink(saveResultFile)

		with PixelBar('Delete progressing...') as bar:
			with open(saveResultFile , 'w') as fw:
				count = 0
				for d,s,fs in os.walk(self._rootPath):
					for f in fs:
						fullPath = os.path.join(d,f)
						ss = f.split('.')
						if ss != None and len(ss) > 0:
							if self._contain_(ss[len(ss)-1]):
								os.unlink(fullPath)
								# print('delete : {}'.format(fullPath))
								fw.write(fullPath+'\n')
								count +=1

						bar.next()

	
		print('*'*100)
		print('root : {}'.format(self._rootPath))
		print('save : {}'.format(saveResultFile))
		print('delete count : {}'.format(count))
		print('-'*100)
Exemple #3
0
def paint_spreadsheet(y_max, x_max, image, filename):

    wb = Workbook()
    ws1 = wb.active
    ws1.title = "Crafted With Love"

    with PixelBar("Painting Canvas", max=x_max) as bar:
        for col in range(1, x_max):
            for row in range(1, y_max):

                row_r = row * 3
                row_g = row_r + 1
                row_b = row_r + 2

                cell_r = ws1.cell(row_r, col)
                cell_g = ws1.cell(row_g, col)
                cell_b = ws1.cell(row_b, col)

                colors = image[row - 1][col - 1]

                cell_r.fill = PatternFill(start_color=rgb2hex(colors[2], 0, 0), fill_type="solid")
                cell_g.fill = PatternFill(start_color=rgb2hex(0, colors[1], 0), fill_type="solid")
                cell_b.fill = PatternFill(start_color=rgb2hex(0, 0, colors[0]), fill_type="solid")

                # ws1.row_dimensions[row_r].height = 3.75
                # ws1.row_dimensions[row_g].height = 3.75
                # ws1.row_dimensions[row_b].height = 3.75
                # ws1.column_dimensions[get_column_letter(col)].width = 2

            bar.next()
    wb.save(filename=f"{filename}.xlsx")
Exemple #4
0
def greedy_dc(model: Model) -> (float, int, float, float):
    """
    Greedy thought:
        Sort sfcs by its computing resources consumption in the increasing
        order. For every sfc, sort each available configuration by its
        latency in the increasing order. Find the first path whose resources
        can fulfill the requirement of sfc. If no available path is found,
        reject the sfc!
    """
    print(">>> Greedy Start <<<")

    topo = copy.deepcopy(model.topo)
    sfcs = model.sfc_list[:]
    sfcs.sort(key=lambda x: x.computing_resources_sum)

    with Timer(verbose_msg=f'[Greedy] Elapsed time: {{}}'), PixelBar(
            "SFC placement") as bar:
        bar.max = len(sfcs)
        for sfc in sfcs:
            configuration = generate_configuration_greedy_dfs(topo, sfc)
            if configuration and is_configuration_valid(
                    topo, sfc, configuration):
                sfc.accepted_configuration = configuration
            bar.next()

    obj_val = objective_value(model)
    accept_sfc_number = len(model.get_accepted_sfc_list())
    latency = average_latency(model)
    print("Objective Value: {} ({}, {}, {})".format(obj_val, evaluate(model),
                                                    accept_sfc_number,
                                                    latency))
    return obj_val, accept_sfc_number, latency, model.compute_resource_utilization(
    )
Exemple #5
0
def tst1():
    import time
    from progress.bar import PixelBar

    with PixelBar('Progressing...', max=5) as bar:
        for i in range(5):
            time.sleep(0.06)
            bar.next()
Exemple #6
0
def PARC(model: Model):
    """
        1. Sort SFCs by its computing resources in the ascending order.
        2. For every sfc, compute its merged chain.
        3. Find the first path whose resources can fulfill the requirement of sfc.
        4. If so, accept the configuration
        5. Otherwise, try to find the path for the origin sfc
        6. If so, accept the configuration
        7. Otherwise, refuse the sfc
    """
    print(">>> Para Greedy Start <<<")

    topo = copy.deepcopy(model.topo)
    sfcs = model.sfc_list[:]
    sfcs.sort(key=lambda sfc: sfc.computing_resources_sum)

    with Timer(verbose_msg=f'[ParaGreedy] Elapsed time: {{}}'), PixelBar(
            "SFC placement") as bar:
        bar.max = len(sfcs)
        for sfc in sfcs:
            optimal_sfc = SFC(sfc.pa.opt_vnf_list[:], sfc.latency,
                              sfc.throughput, sfc.s, sfc.d, sfc.idx)

            optimal_config = generate_configuration_greedy_dfs(
                topo, optimal_sfc)
            if optimal_config:  # generate origin "place" from the merged "place"
                merged_vnf_index = 0
                place = [optimal_config.place[0]]
                for para in sfc.pa.opt_strategy:
                    if para == 0:
                        merged_vnf_index += 1
                    place.append(optimal_config.place[merged_vnf_index])

                configuration = Configuration(sfc, optimal_config.route, place,
                                              optimal_config.route_latency,
                                              optimal_config.idx)
                if is_configuration_valid(topo, sfc, configuration):
                    sfc.accepted_configuration = configuration

            if not sfc.accepted_configuration:
                configuration = generate_configuration_greedy_dfs(topo, sfc)
                if configuration and is_configuration_valid(
                        topo, sfc, configuration):
                    sfc.accepted_configuration = configuration
            # else reject

            bar.next()

    obj_val = objective_value(model)
    accept_sfc_number = len(model.get_accepted_sfc_list())
    latency = average_latency(model)
    print("Objective Value: {} ({}, {}, {}, {})".format(
        obj_val, evaluate(model), accept_sfc_number, latency,
        model.compute_resource_utilization()))
    return obj_val, accept_sfc_number, latency, model.compute_resource_utilization(
    )
def write_records(data, filename):
    series = data[0]
    target = data[1]
    writer = tf.io.TFRecordWriter(
        f'{hp.reanalysis_preprocess_out_dir}/{filename}')

    bar = PixelBar(r'Generating', max=len(data), suffix='%(percent)d%%')
    for s, t in zip(series, target):
        example = tf.train.Example(features=tf.train.Features(
            feature={
                'input_sst': _bytes_feature(s['sst'].tobytes()),
                'input_uwind': _bytes_feature(s['uwind'].tobytes()),
                'input_vwind': _bytes_feature(s['vwind'].tobytes()),
                'input_sshg': _bytes_feature(s['sshg'].tobytes()),
                'input_thflx': _bytes_feature(s['thflx'].tobytes()),
                'output_sst': _bytes_feature(t['sst'].tobytes()),
                'output_uwind': _bytes_feature(t['uwind'].tobytes()),
                'output_vwind': _bytes_feature(t['vwind'].tobytes()),
                'output_sshg': _bytes_feature(t['sshg'].tobytes()),
                'output_thflx': _bytes_feature(t['thflx'].tobytes())
            }))
        writer.write(example.SerializeToString())
        bar.next()
    writer.close()
    bar.finish()
Exemple #8
0
 def start_motion(self,error):
     GPIO.output(12, False)
     GPIO.output(16, True)
     self.set_mode(3)
     number_of_steps = int(math.ceil(self.exposure_time*self.step_per_sec))
     print("required steps are: ",number_of_steps)
     print("precalculated error is set to: ", error, "%")
     step = 0
     progress_bar= PixelBar('Progress bar', max=number_of_steps)
     start = time.time()
     while step<number_of_steps:
         for seq_step in range(len(self.sequence)):
             for pin in range(4):
               GPIO.output(self.Pins[pin], self.sequence[seq_step][pin])
             time.sleep(self.delay-error/100.*self.delay)
             step = step + 1
             progress_bar.next()
             if(step >= number_of_steps):
                 break
     end = time.time()
     progress_bar.finish()
     self.duration = round(end-start,8)
     print("total steps counted were: ",step)
     print("duration measured was: ", self.duration)
     print("error (%): ",100.*round((self.exposure_time-self.duration)/self.exposure_time,8))
     GPIO.output(12, True)
     GPIO.output(16, False)
Exemple #9
0
def progressbar(epilog, current, max):
    global bar
    if current == 0:
        bar = PixelBar()
        width, height = os.get_terminal_size()
        width -= len(f"{max}/{max}")
        width -= len(epilog)
        width -= 2  # ears
        width -= 3  # number of spaces
        bar = PixelBar(epilog, max=max, width=width)

    # width -= len(f"{max}/{max}")

    bar.next()

    if current + 1 == max:
        bar.finish()
Exemple #10
0
def Pb8():
    from progress.bar import PixelBar
    import time

    bar = PixelBar('进度条8', max=100)  #max的值100,可调节

    for i in range(100):  #这个也需要适当调节
        bar.next()
        time.sleep(0.1)  #延迟时间,可调节,0.1~1之间最佳

    bar.finish()
Exemple #11
0
def memory_game(initial, stop):
    seen = {}

    for index, value in enumerate(initial):
        seen[value] = index

    last_spoken = initial[-1]
    # print(f"last spoken: {last_spoken}")
    with PixelBar() as bar:
        for index in range(index - 1, stop - 1):
            next_spoken = index - seen.get(last_spoken, index)
            seen[last_spoken] = index
            last_spoken = next_spoken
            if index % ((stop - 1) // 100) == 0:
                bar.next()

    return last_spoken
Exemple #12
0
def download_resources(resources: dict):
    bar = PixelBar("\U0001F4E5 Downloading resources", max=len(resources))
    for resource_url, resource_path in resources.items():
        try:
            path = os.path.abspath(resource_path)

            content = load(resource_url)
            log.debug(f"{resource_url} loaded")

            storage.save(content, path)
            log.debug(f"'{resource_path}' saved")
        except (errors.DownloadingError, errors.SavingError):
            pass
        finally:
            bar.next()
    bar.finish()
def getName(accession_numbers):
    """Gets the organism name based on NCBI accession numbers.
    Reads will only be classified when the first two BLAST hits 
    are identical. When the the top 2 hits are different the 
    reads will be registered as unclassified.

    Arguments:
        accession_numbers: List of NCBI accession numbers
        from the BLAST output.

    Raises:
        IndexError: Only one result found so no top 3 can be selected.

    Returns:
        Dictionary with all found organism names and count.
    """
    acc_num = []
    identified = []
    count = 0
    for dummyread, numbers in accession_numbers.items():
        count += 1
        try:
            if numbers[0] == numbers[1]:
                acc_num.append(numbers[0])
            else:
                identified.append("unclassified")
        except IndexError:
            identified.append("unclassified")
    Entrez.email = '*****@*****.**'
    print()
    bar = PixelBar('Getting names:', max=len(acc_num), suffix='%(percent)d%%')
    sys.stdout.flush()
    for accession in acc_num:
        handle = Entrez.efetch(db="nucleotide",
                               id=accession,
                               rettype="gb",
                               retmode="text")
        result = handle.read().split('\n')
        for line in result:
            if 'ORGANISM' in line:
                identified.append(' '.join(line.split()[1:3]))
        bar.next()
    bar.finish()
    print()
    name_count = Counter(identified), len(identified)
    return name_count
Exemple #14
0
 def reset_tracker(self):
     GPIO.output(12, False)
     GPIO.output(18, True)
     self.set_mode(2)
     start = time.time()
     step = 0
     number_of_steps = math.ceil(self.exposure_time*self.step_per_sec)
     progress_bar= PixelBar('Progress bar', max=number_of_steps)
     while step < number_of_steps:
       for seq_step in reversed(range(len(self.sequence))):
         step = step + 1
         for pin in range(4):
           GPIO.output(self.Pins[pin], self.sequence[seq_step][pin])
         time.sleep(self.delay_reset_position)
         progress_bar.next()
         if(step >= number_of_steps):
             break
     progress_bar.finish()
     GPIO.output(12, True)
     GPIO.output(18, False)
Exemple #15
0
def read_grib(path):
    print(f'Parsing parameter {str.split(path, "/")[-1]}')
    bar = PixelBar(r'Parsing',
                   max=len(os.listdir(path)),
                   suffix='%(percent)d%%')

    year_record = {}
    for i in os.listdir(path):
        month = smonth
        year = int(str.split(i, '_')[2])
        if calendar.isleap(year):
            month = bmonth

        records = []
        grbs = pg.open(f'{path}/{i}')
        for grb in grbs:
            records.append(grb.values)

        month_record = []
        count = 0
        for j in range(12):
            sum = None
            for k in range(count, count + (month[j] * 4)):
                if sum is None:
                    sum = records[k]
                else:
                    sum += records[k]
                count += 1
            month_record.append(np.array(sum / (month[j] * 4)))
        month_record = np.array(month_record)
        year_record[year] = month_record
        bar.next()
    bar.finish()
    print(year_record)

    reanalysis = []
    for i in range(1851, 2015):
        reanalysis.append(year_record[i])
    reanalysis = np.array(reanalysis)
    data = {f'{str.split(path, "/")[-1]}': reanalysis}
    np.savez(f'{final}/{str.split(path, "/")[-1]}.npz', **data)
def getAccessionNumbers(blastout):
    """Get the top 3 results from all reads in the BLAST output file.

    Arguments:
        blastout: BLAST output file.

    Return:
        Dictionary with read names and the top 3 accession numbers.
    """
    with open(blastout, "r") as bfile:
        total_lines = len(bfile.read().split('\n'))
    with open(blastout, "r") as blastfile:
        current_read_id = ""
        accession_numbers = dict()
        count = 0
        print()
        bar = PixelBar('Getting accession numbers:',
                       max=total_lines - 1,
                       suffix='%(percent)d%%')
        for line in blastfile:
            line = line.split('\t')
            read_id = line[0]
            accession = line[1]
            if read_id != current_read_id:
                count = 0
                current_read_id = read_id
            if count <= 2 and current_read_id != "":
                if read_id in accession_numbers.keys():
                    accession_numbers[read_id].append(accession)
                    count += 1
                else:
                    accession_numbers[read_id] = [accession]
                    count += 1
            bar.next()
        bar.finish()
        print(str(len(accession_numbers.keys())) + " reads found.")
    return accession_numbers
def download_content(page_content, page_url, files_dir):  # noqa: C901, WPS231
    """Download content and correct it's link in parsed page."""
    attr_list = {
        'link': 'href',
        'img': 'src',
        'script': 'src',
    }
    progress_bar = PixelBar('Processing', max=len(page_content))
    for element in page_content:
        progress_bar.next()
        attr = attr_list[element.name]
        try:
            content_url = element[attr]
        except KeyError:
            continue
        normalized_content_url = get_normalized_content_url(
            page_url,
            content_url,
        )
        if urlparse(normalized_content_url).netloc != urlparse(
                page_url).netloc:  # noqa: E501
            continue
        try:
            response, normalized_content_url = make_http_request(
                normalized_content_url, )
        except requests.HTTPError:
            logging.info(f'Failed to download {content_url} - HTTP Error')
            continue
        file_name = get_file_name(normalized_content_url)
        write_file(
            os.path.join(files_dir, file_name),
            response.content,
            binary=True,
        )
        new_link = f'{os.path.split(files_dir)[1]}/{file_name}'  # noqa: WPS237
        replace_content_link(element, attr, new_link)
    progress_bar.finish()
Exemple #18
0
path = 'TXT/' + '_'.join([
    pdf_file[0:5], pdf_file[6:8], pdf_file[9:11], pdf_file[12:16],
    'analitico_composicao_'
])

origem_dados = ';'.join([pdf_file[0:5], pdf_file[6:8], pdf_file[9:16]])

composicao = open(''.join([path, 'dado_basico.txt']), 'w', encoding="utf-8")
apropriacao = open(''.join([path, 'apropriacao.txt']), 'w', encoding="utf-8")

with open(pdf_file, "rb") as f:
    pdf = pdftotext.PDF(f)
    num_pages = len(pdf)

with PixelBar('Escrevendo TXT',
              max=num_pages,
              suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds') as bar:
    for pagina in pdf:

        obj_composicao = Apropriacao()

        obj_regex = RegexApropriacao(pagina)

        for i in range(len(obj_regex.linhas) - 3):
            regex_fic = obj_regex.obter_regex_fic(i)
            regex_producao = obj_regex.obter_regex_producao(i)
            regex_codigo = obj_regex.obter_regex_codigo(i)
            regex_equipamento = obj_regex.obter_regex_equipamento(i)
            regex_mao_obra = obj_regex.obter_regex_mao_de_obra(i)
            regex_tempo_fixo = obj_regex.obter_regex_tempo_fixo(i)
            regex_transporte_rodoviario = obj_regex.obter_regex_transporte_rodoviario(
Exemple #19
0
from subprocess import call
from progress.bar import PixelBar
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
mpl.rcParams['text.usetex'] == True

angles = np.linspace(0, 6.28, 360)

try:
    if sys.argv[1] == 'count':
        open("param_file", "w").close()
        with PixelBar('First calculation...', max=len(angles)) as bar:
            for angle in angles:
                call(["./a.out", str(1000), str(angle)])
                bar.next()
except IndexError:
    pass

ifile = open("param_file", "r")
y = np.array([])
for line in ifile:
    y = np.append(y, float(line.split()[1]))
ifile.close()

plt.scatter(angles, y)

plt.xlabel(r'$\varphi, rad$')
plt.ylabel(r'$v_{fin}$')
plt.yticks(np.linspace(np.min(y), np.max(y), 10))
Exemple #20
0
url = 'http://phisix-api4.appspot.com/stocks/'

while True:

    print()
    a = input(u' Stock/Ticker Code: ')  # add u for it to work
    print()

    if a == 'exit':
        break
    elif a == 'clear':
        os.system('clear')
        continue  # reload the page
    b = '.json'

    with PixelBar(' Fetching Data ...') as bar:  # bar progress
        for i in range(100):
            sleep(0.03)
            bar.next()

    try:
        final = url + a + b
        pse = requests.get(final).json()
        # print(pse)

        print()  #datetime
        pse_asof = pse['as_of']
        d = dateutil.parser.parse(pse_asof)
        print(' Date:  ' + d.strftime('%m/%d/%Y'))  # goods na ito
        oras = (d.strftime('%H:%M'))
        oras1 = datetime.datetime.strptime(oras, '%H:%M').strftime(
Exemple #21
0
import time
from progress.bar import PixelBar

mylist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

bar = PixelBar('PROGRESS', max=len(mylist))

for item in mylist:
    bar.next()
    time.sleep(1)

bar.finish()
Exemple #22
0
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)

# Window size or the sequence length
N_STEPS = 70  # 150
# Lookup step, 1 is the next day
LOOKUP_STEP = 1  # 7
TICKER = "ACB"  # "PYPL"

# set seed, so we can get the same results after rerunning several times
np.random.seed(314)
tf.random.set_seed(314)
random.seed(314)

bar = PixelBar()

## TODO: This needs to take market open and closing times into effect. ie. running at 12:01am will not reflect the day it is on
date_string = datetime.now().strftime("%Y-%m-%d")

PORTFOLIO = [
    "ACB",
    "AVID",
    "HMMJ.TO",
    "BTB-UN.TO",
    "NWH-UN.TO",
    "OGI",
    "PYPL",
    "WELL.TO",
    "BTC-CAD",
    "ETH-CAD",
Exemple #23
0
    RegexComposicao,
    Composicao,
    RegexArquivo,
    Arquivo,
)

##### Extraindo dados arquivo PDF

pdf_file_onerado = "SICRO/GO 10-2020 Relatório Sintético de ComposiçΣes de Custos.pdf"

with open(pdf_file_onerado, "rb") as f:
    cadastro_onerado = pdftotext.PDF(f)
    num_pages_onerado = len(cadastro_onerado)

with PixelBar('Extraindo dados do PDF',
              max=num_pages_onerado,
              suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds') as bar:

    lista_composicao = list()

    for pagina in cadastro_onerado:
        linhas_pagina_atual_pdf = pagina.split('\n')
        linhas_pagina_atual_pdf.pop(-2)

        for linha in linhas_pagina_atual_pdf:

            obj_regex = RegexComposicao(linha)

            if (obj_regex.cabecalho is None) and (
                    obj_regex.principal
                    is not None) and (len(obj_regex.principal.groups()) > 4):
Exemple #24
0
def askOptions():
    thestr = f'''{Fore.CYAN}    MMMMMMMMMMNXKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKXWMMMMMMMMMMMMMM
    MMMMMMMMMNo'........................................................................,kWMMMMMMMMMMMMM
    MMMMMMMMMX:  ..;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;,,,,,,.  .oWMMMMMMMMMMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oNWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWNNXXXXX0{Style.RESET_ALL}:  .{Fore.CYAN}oWMMMMMMMMMMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWNNNNNK{Style.RESET_ALL}:. .{Fore.CYAN}oWMMMMMMMMMMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWNNNNNK{Style.RESET_ALL}:. .{Fore.CYAN}oWMMMMMMMMMMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKkkkkkkkxxddddo{Style.RESET_ALL}'  .{Fore.CYAN}:xkkkkkkkKWMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK|{Style.RESET_ALL}{Fore.CYAN};-----------'00'-----------;KMMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|Oddddddddxxxxxddddddddc|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMMMMMMX:  .{Fore.GREEN}oWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMWXKKKO;  .{Fore.GREEN}c0KKKKNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMMK:',;,',;,,,;,',{Fore.GREEN}kMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|l0XKXXXXKKXKo|{Style.RESET_ALL}{Fore.GREEN}xWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMMMO|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMMWx|{Style.RESET_ALL}{Fore.GREEN}xWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMMWWk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMMMx|{Style.RESET_ALL}{Fore.GREEN}xWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWO|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMMMWWWk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMMMx|{Style.RESET_ALL}{Fore.GREEN}xWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWWNO|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMMMWWNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMMWx|{Style.RESET_ALL}{Fore.GREEN}xWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWWNNNO|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMMMMWWNNNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMMWd|{Style.RESET_ALL}{Fore.GREEN}ckOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOkkxxxxxo|{Style.RESET_ALL}  {Fore.YELLOW}|OMMMMMMMMMMMWWWNNNNNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMMMWNd|{Style.RESET_ALL}{Fore.CYAN}..............................................   {Fore.YELLOW}|OMMMMMMWWWWWNNNNNNNNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMMMMWWWNd|{Style.RESET_ALL}{Fore.CYAN},clllllllllllllllllllc.         .,cllllllllll:.  {Fore.YELLOW}|kWWWWWWNNNNNNNNNNNNNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lNMMMWWWNNNNd|{Style.RESET_ALL}{Fore.CYAN}xWMMMMMMMMMMWXKKKKKKKO;         .c0KKKKKKKNWM0,  {Fore.YELLOW}|kNNNNNNNNNNNNNNNNNNNNNk|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lXWWWNNNNNNNd|{Style.RESET_ALL}{Fore.CYAN}xWMMMMMMMMMMKc.........          .........oNM0,  {Fore.YELLOW}|xXXXXXXXXXXXXXXXXXXXXXx|{Style.RESET_ALL} {Fore.CYAN}0MMMM
    MMMM0{Fore.YELLOW}|lKKKKKKKKKKKo|{Style.RESET_ALL}{Fore.CYAN}xWMMMMMMMMMMXl,,,,,,,,,,,,,,,,,,,,,,,,,,,,dNM0,  __________(0)___________ ,0MMMM
    MMMM0,.....''''(0)''''......xMMMMMMMMMMMWNNNNNNNNNNNNNNNNNNNNNNNNNNNNNWMM0;  MMMMMMMMM.:::.MMMMMMMMMM ,0MMMM
    MMMMXxooooodxdooooodKMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNOooooooooooooodoooooooooooookNMMMM'''
    while True:
        os.system('cls')
        print(thestr.center(10))
        device = str(input(f'{Style.RESET_ALL}Select your Option\n1. Search Laptops\n2. Search Mobile\nYour Choice:\t'))
        if(device == '1'):            
            option = str(input(f'{Style.RESET_ALL}Select your Option\n1. Search Laptop Model\n2. Recomend A Laptop\nYour Choice:\t'))
            if(option == '1'):
                mobile = input('What is the Device Brand/Name?')
                url = "https://www.91mobiles.com/search_page.php?q=" + mobile + "&type=all&utm_source=autosuggest"
                url = ' '.join(url.split())
                op = webdriver.ChromeOptions()
                op.add_argument('headless')
                driver = webdriver.Chrome('./chromedriver',options=op) 
                driver.get(url) 
                time.sleep(3)
                html = driver.page_source
                searchSoup =  bs(html, 'html.parser')
                texts = []
                links = []
                for i in searchSoup.findAll('ul',{'class':'product_listing'}):
                    for j in i.findAll('li',{'class':'finder_snipet_wrap'}):
                        for k in j.findAll('div',{'class':'content_info'}):
                            for link in k.findAll('div',{'class':'pro_grid_name'}):
                                temp = link.a['href']
                                temp = temp.replace(' ','')
                                temp = temp.replace('\n','')
                                links.append('https://91mobiles.com'+temp)
                                tname = link.getText()
                                tname = tname.replace('  ','')
                                tname = tname.replace('\n','')
                                texts.append(tname)
                for i in range(0,len(texts)):
                    print(str(i+1)+"\t: \t"+texts[i])
                number = 0
                number = input('Enter the number of the mobile you want to search?')
                searchUrl = links[int(number)-1]
                searchUrl = ' '.join(searchUrl.split())
                with PixelBar('Processing...', max=30) as bar:
                    for xys in range(30):
                        time.sleep(0.1)
                        bar.next()

                fetchAndPrintData(searchUrl)
                
            elif(option == '2'):
                os.system('cls')
                choice = 1
                if(choice == 1):
                    theRange = int(input(f'What\'s Your Range?\n{bcolors.WARNING}1. Below Rs. 20000\n2. Rs. 20000 to Rs. 30000\n3. Rs.30000 to Rs. 40000\n4. Rs.40000 to Rs. 50000\n5. Rs. 50000 to Rs. 60000\n6. Above Rs. 50000\n7. Top 10 Laptops{bcolors.ENDC}\nYour Choice?\t'))
                    rangeUrl = 'https://www.91mobiles.com/'+ rangeOpts[theRange-1]
                    print(rangeUrl)
                    with PixelBar('Processing...', max=30) as bar:
                        for xys in range(30):
                            time.sleep(0.1)
                            bar.next()

                    scrapeRanges(rangeUrl)
                else:
                    print('Invalid Option')
                    input('Retry.......Enter Any Key.......')                    
                    os.system('cls')
            else:
                print('Invalid Option......')
                input('Retry.......Enter Any Key.......')
                os.system('cls')
        elif(device == '2'):
            mobile = input('What is the Device Brand/Name?')
            url = "https://www.91mobiles.com/search_page.php?q=" + mobile + "&type=all&utm_source=autosuggest"
            url = ' '.join(url.split())
            op = webdriver.ChromeOptions()
            op.add_argument('headless')
            driver = webdriver.Chrome('./chromedriver',options=op) 
            driver.get(url) 
            time.sleep(3)
            html = driver.page_source
            searchSoup =  bs(html, 'html.parser')
            texts = []
            links = []
            for i in searchSoup.findAll('ul',{'class':'product_listing'}):
                for j in i.findAll('li',{'class':'finder_snipet_wrap'}):
                    for k in j.findAll('div',{'class':'content_info'}):
                        for link in k.findAll('div',{'class':'pro_grid_name'}):
                            temp = link.a['href']
                            temp = temp.replace(' ','')
                            temp = temp.replace('\n','')
                            links.append('https://91mobiles.com'+temp)
                            tname = link.getText()
                            tname = tname.replace('  ','')
                            tname = tname.replace('\n','')
                            texts.append(tname)
            for i in range(0,len(texts)):
                print(str(i+1)+"\t: \t"+texts[i])
            number = 0
            number = input('Enter the number of the mobile you want to search?')
            searchUrl = links[int(number)-1]
            searchUrl = ' '.join(searchUrl.split())
            with PixelBar('Processing...', max=30) as bar:
                for xys in range(30):
                    time.sleep(0.1)
                    bar.next()

            fetchAndPrintData(searchUrl)
        else:
            input('Invalid Option.......press any key')
            os.system('cls')
Exemple #25
0
from classes import (
                        Arquivo,
                        Material,
                        RegexMaterial,
                        RegexArquivo,
                    )

##### Extraindo dados arquivo PDF

pdf_file = "SICRO/GO 10-2020 Relatório Sintético de Materiais.pdf"

with open( pdf_file, "rb" ) as f:
    cadastro = pdftotext.PDF( f )
    num_pages = len( cadastro )

with PixelBar('Extraindo dados do PDF', max=num_pages, suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds') as bar:

    lista_material = list()

    for pagina in cadastro:
        linhas_pagina_atual_pdf = pagina.split('\n') 
        linhas_pagina_atual_pdf.pop(-2)

        for linha in linhas_pagina_atual_pdf:

            obj_regex = RegexMaterial( linha )

            if ( obj_regex.cabecalho is None ) and ( obj_regex.principal is not None ) and ( len( obj_regex.principal.groups() ) == 4 ):

                obj_material = Material( obj_regex.principal )
                lista_material.append( obj_material )               
Exemple #26
0
if __name__ == '__main__':
  if len(argv)<5:
    exit(REDC+"Usage: {} ip port count time".format(argv[0]))
  socketList = []
  logger.info('count: {} timer: {}'.format(count, timer))

  bar = Counter(GREENC+'Creating sockets: '+YELLOWC, max=count)
  for _ in range(count):
    try: soc=init(ip, port)
    except error: break
    socketList.append(soc)
    bar.next()

  print()
  while True:
    sendbar = PixelBar(GREYC+'Sending keep-alive Headers'+REDC, max=timer)
    logger.info('Sending keep-alive Headers')
    
    for soc in socketList:
      try: soc.send('X-a {}\r\n'.format(randint(1,6000)).encode('utf-8'))
      except error: socketList.remove(soc)

    for _ in range(count - len(socketList)):
      try:
        soc=init(ip, port)
        if soc: socketList.append(soc);logger.error('Socket Died')
      except error: break
    
    for t in range(timer):
      sleep(1); sendbar.next()
    sendbar.start()
Exemple #27
0
Easy progress reporting for Python
"""

import time
from progress.bar import IncrementalBar, ChargingBar, FillingSquaresBar, Bar, PixelBar, ShadyBar

MAX = 50
MYLIST = range(MAX)


def iterbar(bar, mylist):
    for i in mylist:
        bar.next()
        time.sleep(0.04)
    bar.finish()


if __name__ == '__main__':
    bar = Bar('Bar', max=MAX)
    filling_squares_bar = FillingSquaresBar('FillingSquaresBar', max=MAX)
    charging_bar = ChargingBar('ChargingBar', max=MAX)
    incremental_bar = IncrementalBar('IncrementalBar', max=MAX)
    pixel_bar = PixelBar('PixelBar', max=MAX)
    shady_bar = ShadyBar('ShadyBar', max=MAX)

    iterbar(bar, MYLIST)
    iterbar(filling_squares_bar, MYLIST)
    iterbar(charging_bar, MYLIST)
    iterbar(incremental_bar, MYLIST)
    iterbar(pixel_bar, MYLIST)
    iterbar(shady_bar, MYLIST)
                        Equipamento,
                        RegexArquivo,
                        Arquivo,
                    )

##### Abrindo arquivo PDF onerado

pdf_file_onerado = "SICRO/GO 10-2020 Relatório Sintético de Equipamentos.pdf"

with open( pdf_file_onerado, "rb" ) as f_onerado:
    cadastro_onerado = pdftotext.PDF( f_onerado )
    num_pages_onerado = len( cadastro_onerado )

##### Extraindo dados do PDF onerado

with PixelBar('Extraindo dados do PDF onerado', max=num_pages_onerado, suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds') as bar:


###### Populando lista com instância de Equipamento

    lista_equipamento = list()

    for pagina in cadastro_onerado:
        linhas_pagina_atual_pdf_file_onerado = pagina.split('\n')
        linhas_pagina_atual_pdf_file_onerado.pop(-2)

        for linha in linhas_pagina_atual_pdf_file_onerado:
            
            obj_regex_onerado = RegexEquipamento( linha )

            if ( obj_regex_onerado.cabecalho is None ) and ( obj_regex_onerado.principal is not None ):
def main() -> None:

    # This function is to large, lets break it up

    # Get all of the letters of the alphabet and the number of page indexes that they have
    temp: dict = {}
    with PixelBar("Getting page numbers for dictionary keys... ",
                  max=27) as pb:
        unicodeLetter: chr
        i: int
        for i in range(96, 123):
            if i == 96:
                unicodeLetter = "0"
            else:
                unicodeLetter = chr(i)
            temp[unicodeLetter] = getLetterPageCount(unicodeLetter)
            pb.next()

    # For each letter return all of the words starting with that letter and write it to JSON
    letter: str
    for letter in temp.keys():

        data: dict = {}
        data["letter"] = letter
        data["indexURLs"] = {}

        with PixelBar(
                f"Getting words listed under the dictionary index: {letter}... ",
                max=temp[letter],
        ) as pb:
            i: int
            for i in range(temp[letter]):
                wordList: list
                indexURL: str = f"https://www.merriam-webster.com/browse/dictionary/{letter}/{i + 1}"

                html: BeautifulSoup = getHTML(url=indexURL)
                wordList = getWords(html=html)

                data["indexURLs"][indexURL] = {"numberOfWords": 0, "words": []}
                data["indexURLs"][indexURL]["numberOfWords"] = len(wordList)

                word: str
                for word in wordList:
                    data["indexURLs"][indexURL]["words"].append({
                        "word":
                        word,
                        "type": [],
                        "definitions": [],
                        "wordURL":
                        f"https://www.merriam-webster.com/dictionary/{word}".
                        replace(" ", "+"),
                    })
                pb.next()

        writeToJSON(filename=f"output/{letter}.json", store=data)

    # Load data from JSON file and get all of the word types associated with the words

    i: int
    unicodeLetter: chr
    for i in range(96, 123):
        if i == 96:
            unicodeLetter = "0"
        else:
            unicodeLetter = chr(i)

        jsonFile: dict = loadJSON(filename=f"output/{unicodeLetter}.json")

        wordList: list
        index: str
        for index in jsonFile["indexURLs"].keys():
            urlIndexData: dict = jsonFile["indexURLs"][index]

            word: dict
            for word in urlIndexData["words"]:
                wordTypeList: list = []

                wordHTML: BeautifulSoup = getHTML(url=word["wordURL"])

                wordTypeResultSet: ResultSet = wordHTML.find_all(
                    name="a", attrs={"class": "important-blue-link"})

                # TODO: This for loop could be more efficent
                wordTypeTag: Tag
                for wordTypeTag in wordTypeResultSet:
                    wordTypeList.append(wordTypeTag.text)

                    try:
                        subType: str = wordTypeTag.find(
                            name="a", attrs={
                                "class": "important-blue-link"
                            }).text

                        wordTypeList.append(subType)
                    except AttributeError:
                        pass

                word["type"] = wordTypeList

        writeToJSON(filename=f"output/{unicodeLetter}.json", store=jsonFile)
Exemple #30
0
            module='[detection]',
            version='[v 1.0]',
            service_account_json_file='/Users/'
                                      'admin/Downloads/savvy-etching-254922-e6fda8dabd2c.json')
    except ImportError:
        pass


def sleep():
    t = 0.01
    t += t * random.uniform(-0.1, 0.1)  # Add some variance
    time.sleep(t)


suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
bar = PixelBar('PROCESSING', suffix=suffix)
for i in bar.iter(range(150)):
    sleep()


"""VIDEO DETECTION LINE'S"""


cap = cv2.VideoCapture("test2.mp4")
while cap.isOpened():
    _, frame = cap.read()
    canny_image = canny(frame)
    cropped_image = region_of_interest(canny_image)
    # Угловой коээфициент
    lines = cv2.HoughLinesP(cropped_image, 2, np.pi / 180, 100, np.array([()]),
                            minLineLength=40,