Exemplo n.º 1
0
def fetch_cities_json():
    click.echo(hue.yellow('Fetching cities json...'))
    json_url = 'https://raw.githubusercontent.com/michaeltcoelho/'\
        'Municipios-Brasileiros/master/municipios_brasileiros.json'
    http = urllib3.PoolManager()
    response = http.request('GET', json_url)
    data = json.loads(response.data.decode('utf-8-sig'))
    click.echo(hue.yellow('Cities json fetched...'))
    return data
Exemplo n.º 2
0
 def output_stats(self):
     print("\t| {} {}".format(green("Player:"), self.name))
     print("\t|\t {} {}, {} {}".format(yellow("Gold:"), self.gold,
                                       orange("Infamy:"), self.infamy))
     for unit in self.units:
         print("\t|\t {}".format(unit))
     print("\t| {} {}".format(red("Opponent:"), self.opponent.name))
     print("\t|\t {} {}, {} {}".format(yellow("Gold:"), self.opponent.gold,
                                       orange("Infamy:"),
                                       self.opponent.infamy))
     for unit in self.opponent.units:
         print("\t|\t {}".format(unit))
Exemplo n.º 3
0
def size(path, json=False, verbose=False):
    '''
    check size of a given path or directory
    
    examples: \n\n
      luz disk size /tmp \n 
      luz disk size /home --json (print output in JSON) \n
      luz disk size /home -v (print names and size of all subfolders)

    '''
    #click.echo('\n'+cyan('%s disk size\n' % path))


    payload = get_dir_size(json, verbose, path)

    if payload == 'error':
        return

    if json:
        if True in verbose:
            click.echo(color(payload, bg='black', fg='white'))
        else:
            try:
                #payload = ast.literal_eval(json.dumps(payload))
                print(payload)
                click.echo(yellow(json.dump(payload['total'])))
            except AttributeError as e:
                click.echo(red('error generating json, %s' % str(e)))
                click.echo(yellow('total (kb):  ' + str(payload['total']['kb'])))
    else:
        if True in verbose:
            for d in payload['dirs']:
                try:

                    click.echo(color(str(d), fg='yellow') + color('  {:,} bytes'.format(payload['dirs'][d]), fg='white')) 
                    #click.echo(white('{:,} bytes'.format(payload['dirs'][d])))
                    #click.echo(yellow(str(d) + ':  ' + str(payload['dirs'][d]) + ' b'))
                except UnicodeEncodeError as e:
                    click.echo(red('error displaying sub directories %s' % str(e)))

            #click.echo(yellow('total (b):  ' + str(payload['total']['b'])))
            #click.echo(yellow('total (kb):  ' + str(payload['total']['kb'])))
           # click.echo(yellow('total (mb):  ' + str(payload['total']['mb'])))
            #click.echo(yellow('total (gb):  ' + str(payload['total']['gb'])))
        else:
            total_b = str(payload['total']['b'])
            total_kb = str(payload['total']['kb'])
            total_mb = str(payload['total']['mb'])
            total_gb = str(payload['total']['gb'])
            click.echo(yellow("disk space: {0}").format(path))
            horizontal()
            click.echo(white("{0} B\n{1} KB\n{2} MB\n{3} GB\n".format(total_b, total_kb, total_mb, total_gb)))
Exemplo n.º 4
0
def colorize_option(chave, valor):
    '''
	Based on index type format and print out menu options.
	'''
    if type(chave) == int:
        selector = yellow(' [') + bold(red('%s')) + yellow('] ')
        suffix = yellow('%s')
        return selector % chave + suffix % valor
    if type(chave) == str:
        pos = valor.lower().find(chave)
        prefix, radical, suffix = valor.partition(valor[pos])
        if prefix:
            prefix = red('%s')
        radical = yellow('[') + bold(red('%s' % radical)) + yellow(']')
        return ' %s%s%s\n' % (prefix, radical, suffix)
Exemplo n.º 5
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = SE_ResNeXt101FT(num_classes=340, pretrained=load_pretrained)

    if not args.load_only_extractor:
        return model

    model.load_state_dict(
        torch.load(
            'extensions/qd/data/experiments/se_resnext_n01z3/checkpoints/se_resnext101_n.pth'
        )['state_dict'])

    # Extractor
    feature_extractor = model.features

    # Construct

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = nn.Sequential(
        nn.Dropout(args.dropout_p),
        MultiHead(in_features=2048, num_classes=num_classes))

    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 6
0
def _optimize_file(path):

    tinify.key = get_api_key()

    with yaspin(text="Optimizing", color="cyan") as spinner:

        try:

            before_size = getsize(path)

            source = tinify.from_file(path)
            source.to_file(path)

            after_size = getsize(path)

            # compare
            if before_size == after_size:
                before = before_size
                after = after_size

            else:
                before = huepy.blue(before_size)
                after = huepy.yellow(after_size)

            log_msg = "{0} {1} → {2}"
            msg = log_msg.format(os.path.basename(path), before, after)
            log = logger.info(msg, flag=SUCCESS)

        except (ClientError, ServerError, ConnectionError):

            log = logger.error(os.path.basename(path), flag=FAILURE)

        finally:
            spinner.write(log)
Exemplo n.º 7
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = sys.modules[__name__].__dict__[args.arch](
        num_classes=1000, pretrained='imagenet' if load_pretrained else None)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Extractor
    feature_extractor = nn.Sequential(model.layer0, model.layer1, model.layer2,
                                      model.layer3, model.layer4)

    # Predictor
    predictor = MultiHead(in_features=model.last_linear.in_features,
                          num_classes=num_classes)
    # if args.dropout_p > 0:
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 8
0
    def get_net(args):

        load_pretrained = args.net_init == 'pretrained'
        if load_pretrained:
            print(yellow(' - Loading a net, pretrained on ImageNet1k.'))

        model = resnext.__dict__[args.arch](num_classes=1000,
                                            pretrained=load_pretrained)

        # Extractor
        feature_extractor = nn.Sequential(model.conv1, model.bn1, model.relu,
                                          model.maxpool, model.layer1,
                                          model.layer2, model.layer3,
                                          model.layer4)

        # Predictor
        predictor = MultiHead(
            in_features=model.fc.in_features,
            num_classes=[int(x) for x in args.num_classes.split(',')])
        if args.dropout_p > 0:
            predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

        # Construct
        model = BaseModel(feature_extractor, predictor, args.pooling)

        return model
Exemplo n.º 9
0
 def __str__(self):
     unit_name = "Ship" if self.ship_health else "Crew"
     health = self.ship_health or self.crew_health
     return "{} #{} ({} {}, {} {}, {} {})".format(
         unit_name, self.id, yellow("Gold:"), self.gold, cyan("Health:"),
         health, lightpurple("Crew:"), self.crew
     )
Exemplo n.º 10
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = polynet(num_classes=1000,
                    pretrained='imagenet' if load_pretrained else None)

    feature_extractor = nn.Sequential(
        model.stem,
        model.stage_a,
        model.reduction_a,
        model.stage_b,
        model.reduction_b,
        model.stage_c,
    )

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Predictor
    predictor = MultiHead(in_features=model.last_linear.in_features,
                          num_classes=num_classes)
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 11
0
 def iterdata(self):
     """Yield each data from the output queue."""
     try:
         while self.output_queue.not_empty:
             data = self.output_queue.get_nowait()
             yield data
     except queue.Empty:
         print(hue.yellow('No more data available :)'))
Exemplo n.º 12
0
    def process(self, data):
        print(hue.bold(hue.green("\n------ {} ------".format(datetime.now()))))
        print(
            hue.yellow("Full packet data: ") +
            hue.italic(binascii.hexlify(data)))

        # Checks if the 802.15.4 packet is valid
        if makeFCS(data[:-2]) != data[-2:]:
            print(hue.bad("Invalid packet"))
            return

        # Parses 802.15.4 packet
        packet = Dot15d4FCS(data)
        packet.show()

        if packet.fcf_frametype == 2:  # ACK
            return

        # Tries to match received packet with a known link
        # configuration
        matched = False
        for link in self.link_configs:
            if packet.dest_panid != link.dest_panid:
                continue
            if packet.fcf_srcaddrmode == 3:  # Long addressing mode
                if packet.src_addr != link.source.get_long_address():
                    continue
                if packet.dest_addr != link.destination.get_long_address():
                    continue
            else:
                if packet.src_addr != link.source.get_short_address():
                    continue
                if packet.dest_addr != link.destination.get_short_address():
                    continue
                source = link.source
                destination = link.destination
                key = link.key
                matched = True

        if not matched:
            if packet.fcf_srcaddrmode == 3:
                source = Rf4ceNode(packet.src_addr, None)
                destination = Rf4ceNode(packet.dest_addr, None)
            else:
                source = Rf4ceNode(None, packet.src_addr)
                destination = Rf4ceNode(None, packet.dest_addr)
            key = None

        # Process RF4CE payload
        frame = Rf4ceFrame()
        try:
            rf4ce_payload = bytes(packet[3].fields["load"])
            frame.parse_from_string(rf4ce_payload, source, destination, key)
        except Rf4ceException, e:
            print(hue.bad("Cannot parse RF4CE frame: {}".format(e)))
            return
Exemplo n.º 13
0
 def get_climatempo_city_link(self, url, page_source):
     print(hue.blue(f'Scraping Google page at {url}'))
     page = BeautifulSoup(page_source, 'html.parser')
     css_selector = 'a[href*=/url?q=https://www.climatempo.com.br/climatologia/]'
     climatempo_link_tag = page.select_one(css_selector)
     if climatempo_link_tag is None:
         print(hue.yellow(f'Climatempo link not found on Google at {url}'))
         return ''
     climatempo_link = self.get_climatempo_link_from_tag(
         climatempo_link_tag)
     print(
         hue.green(
             f'Climatempo link {climatempo_link} scraped on google at {url}'
         ))
     return climatempo_link
Exemplo n.º 14
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained'
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = MobileNetV2(pretrained=load_pretrained,
                        return_features=args.return_features)

    model.classifier = nn.Sequential(
        nn.Dropout(args.dropout_p),
        nn.Linear(model.last_channel, args.num_outputs),
    )

    return model
Exemplo n.º 15
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = se_resnext50(num_classes=1000, pretrained=load_pretrained)

    # if not args.load_only_extractor:
    # return model

    # model.load_state_dict(torch.load('extensions/qd/data/experiments/se_resnext_n01z3/checkpoints/se_resnext101_n.pth')['state_dict'])

    if args.num_input_channels != 3:
        # if args.num_input_channels % 3 != 0:
        #     assert False

        conv1_ = model.conv1
        model.conv1 = torch.nn.Conv2d(args.num_input_channels,
                                      conv1_.out_channels,
                                      kernel_size=conv1_.kernel_size,
                                      stride=conv1_.stride,
                                      padding=conv1_.padding,
                                      bias=False)

        for i in range(int(args.num_input_channels / 3)):
            model.conv1.weight.data[:, i * 3:(i + 1) *
                                    3] = conv1_.weight.data / (int(
                                        args.num_input_channels / 3))

        if args.num_input_channels % 3 > 0:
            model.conv1.weight.data[:, -(args.num_input_channels %
                                         3):] = conv1_.weight.data[:, -(
                                             args.num_input_channels % 3):]

    feature_extractor = nn.Sequential(model.conv1, model.bn1, model.relu,
                                      model.maxpool, model.layer1,
                                      model.layer2, model.layer3, model.layer4)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = nn.Sequential(
        nn.Dropout(args.dropout_p),
        MultiHead(in_features=2048, num_classes=num_classes))

    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 16
0
    def get_net(args):

        load_pretrained = args.net_init == 'pretrained'
        if load_pretrained:
            print(yellow(' - Loading a net, pretrained on ImageNet1k.'))

        model = linknet.LinkNet(num_input_channels=args.num_input_channels,
                                num_output_channels=args.num_output_channels,
                                depth=args.resnet_depth,
                                pretrained=load_pretrained)

        if args.freeze_basenet:
            model.freeze_basenet()
        else:
            model.unfreeze_basenet()

        return model
Exemplo n.º 17
0
    def get_net(args):

        load_pretrained = args.net_init == 'pretrained'
        if load_pretrained:
            print(yellow(' - Loading a net, pretrained on ImageNet1k.'))

        model = inception_v4.InceptionV4(num_classes=1001,
                                         pretrained=load_pretrained)

        num_classes = [int(x) for x in args.num_classes.split(',')]

        predictor = MultiHead(in_features=1536, num_classes=num_classes)
        predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

        # Construct
        model = BaseModel(model.features, predictor, args.pooling)

        return model
Exemplo n.º 18
0
    def get_net(args):

        load_pretrained = args.net_init == 'pretrained'
        if load_pretrained:
            print(yellow(' - Loading a net, pretrained on ImageNet1k.'))

        resnet = torchvision.models.__dict__[args.arch](
            pretrained=load_pretrained)

        # If an image has different number of channelss
        if args.num_input_channels != 3:
            if args.num_input_channels % 3 != 0:
                assert False

            conv1_ = resnet.conv1
            resnet.conv1 = torch.nn.Conv2d(args.num_input_channels,
                                           conv1_.out_channels,
                                           kernel_size=conv1_.kernel_size,
                                           stride=conv1_.stride,
                                           padding=conv1_.padding,
                                           bias=False)

            for i in range(int(args.num_input_channels / 3)):
                resnet.conv1.weight.data[:, i * 3:(i + 1) *
                                         3] = conv1_.weight.data / 3

        # Extractor
        feature_extractor = nn.Sequential(resnet.conv1, resnet.bn1,
                                          resnet.relu, resnet.maxpool,
                                          resnet.layer1, resnet.layer2,
                                          resnet.layer3, resnet.layer4)

        # Predictor
        predictor = MultiHead(
            in_features=resnet.fc.in_features,
            num_classes=[int(x) for x in args.num_classes.split(',')])
        if args.dropout_p > 0:
            predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

        # Construct
        model = BaseModel(feature_extractor, predictor)

        return model
Exemplo n.º 19
0
def disp_exp(options):
    if all(i.startswith(('1', '2', '3', '4', '5', '6')) for i in options.exp):
        options.include = options.exp
    for b in get_recommendations(options):
        if b[2]:
            profileServer = 'Level ' + str(b[2]) + ' Server'
        else:
            profileServer = 'N/A'
        if b[3]:
            profileWorkstation = 'Level ' + str(b[3]) + ' Workstation'
        else:
            profileWorkstation = 'N/A'
        exp = '{:<9}|{:<10}|{:<14}|{:<19}|'.format(
            b[0], 'Scored' if b[1] else 'Not Scored', profileServer, profileWorkstation) + b[4]
        if b[1]:
            print(bold(green(exp)))
        else:
            print(bold(yellow(exp)))
    exit()
Exemplo n.º 20
0
    def destruct_response(cls, response: ty.Dict[str, ty.Any]) -> VKAPIError:
        """Разбирает ответ от вк про некорректный API запрос
        на части и инициализирует сам объект исключения

        Args:
          response: ty.Dict[str:
          ty.Any]:
          response: ty.Dict[str:

        Returns:

        """
        status_code = response["error"].pop("error_code")
        description = response["error"].pop("error_msg")
        request_params = response["error"].pop("request_params")
        request_params = {
            item["key"]: item["value"]
            for item in request_params
        }

        pretty_exception_text = (huepy.red(f"\n[{status_code}]") +
                                 f" {description}\n\n" +
                                 huepy.grey("Request params:"))

        for key, value in request_params.items():
            key = huepy.yellow(key)
            value = huepy.cyan(value)
            pretty_exception_text += f"\n{key} = {value}"

        # Если остались дополнительные поля
        if response["error"]:
            pretty_exception_text += (
                "\n\n" + huepy.info("There are some extra fields:\n") +
                str(response["error"]))

        return cls(
            pretty_exception_text=pretty_exception_text,
            description=description,
            status_code=status_code,
            request_params=request_params,
            extra_fields=response["error"],
        )
Exemplo n.º 21
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    feature_extractor = ResidualNet('ImageNet',
                                    args.depth,
                                    1000,
                                    args.att_type,
                                    pretrained=load_pretrained)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = MultiHead(in_features=feature_extractor.fc.in_features,
                          num_classes=num_classes)
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    feature_extractor.fc = None

    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 22
0
def get_net(args):
    
    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = inceptionresnetv2(num_classes=1000, pretrained='imagenet' if load_pretrained else None) 

    feature_extractor = nn.Sequential(
        model.conv2d_1a,
        model.conv2d_2a,
        model.conv2d_2b,
        model.maxpool_3a,
        model.conv2d_3b,
        model.conv2d_4a,
        model.maxpool_5a,
        model.mixed_5b,
        model.repeat,
        model.mixed_6a,
        model.repeat_1,
        model.mixed_7a,
        model.repeat_2,
        model.block8,
        model.conv2d_7b,
    )

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Predictor 
    predictor = MultiHead(in_features = model.last_linear.in_features, num_classes=num_classes)
    predictor = nn.Sequential( nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
Exemplo n.º 23
0
def horizontal():
    ''' prints a horizontal line separator '''
    return (click.echo(yellow('----------------------')))
Exemplo n.º 24
0
with open(file_path, 'w', newline='') as csvfile:
    csvwriter = writer(csvfile, dialect='excel')
    csvwriter.writerow(
        ['Recommendation Number', 'Message', 'Result', 'Explanation', 'Time'])

length = len(recommendations)
score = 0
passed = 0

if options.verbose:
    # printing the legend for verbose output
    print('Done. Here\'s the legend for the test results:')
    print(bold(green('Green  Text indicates tests that have PASSED')))
    print(bold(red('Red    Text indicates tests that have FAILED')))
    if options.score == None:
        print(bold(yellow('Yellow Text indicates tests that are  NOT SCORED')))
    print('\n\nPerforming ' + str(length) + ' tests now...\n')
else:
    print('Done. Performing ' + str(length) + ' tests now...\n\n')

# progressbar format
bar_format = u'{count:03d}/{total:03d}{percentage:6.1f}%|{bar}| ' + \
    bold(green('pass')) + u':{count_0:{len_total}d} ' + \
    bold(red('fail')) + u':{count_1:{len_total}d} ' + \
    bold(yellow('chek')) + u':{count_2:{len_total}d} ' + \
    u'[{elapsed}<{eta}, {rate:.1f}{unit_pad}{unit}/s]'
passd = manager.counter(total=length,
                        unit='tests',
                        color='white',
                        bar_format=bar_format)
faild = passd.add_subcounter('white')
Exemplo n.º 25
0
    def get_net(self, args, train_dataloader, criterion):

        model = self.net_wrapper.get_net(args)
        model = model.to(args.device)

        if args.fp16:
            model = FP16Model(model)

        # Load checkpoint
        if args.checkpoint is not None and args.checkpoint != Path('.'):

            checkpoint_path = self.get_checkpoint_path(args)
            model = self.load_state(args, checkpoint_path, model)
        else:
            model = self.init_weights(args, model, train_dataloader)

        if not args.fancy_stuff:
            return model

        # Some other stuff
        if hasattr(model, 'feature_extractor'):
            value = not args.fix_feature_extractor
            set_param_grad(model.feature_extractor,
                           value=value,
                           set_eval_mode=False)

        if args.freeze_bn:
            print(yellow(' - Freezing BN'))

            def freeze_bn(m):
                if isinstance(m, torch.nn.BatchNorm2d):

                    m.training = False

                    def nop(*args, **kwargs):
                        pass

                    m.train = nop

            model.apply(freeze_bn)

        if args.merge_model_and_loss:
            model = ModelAndLoss(model, criterion)

        if args.use_all_gpus and args.device == 'cuda' and torch.cuda.device_count(
        ) > 1:
            print(yellow(' - Using all GPU\'s!'))

            model = torch.nn.DataParallel(model)

        if args.bn_momentum != -1:

            def freeze_bn1(m):
                if isinstance(m, torch.nn.BatchNorm2d):

                    m.momentum = args.bn_momentum

                    # def nop(*args, **kwargs):
                    #     pass

                    # m.train = nop

            model.apply(freeze_bn1)

        return model
Exemplo n.º 26
0
# Process stages
if 'stages' not in vars(args): 
    args.stages = {'main': {}}


if args.stage is not None:

    if args.stage != 'none':
        args.stages = {args.stage: args.stages[args.stage]}
    else:
        args.stages = {'main': {}}

# Run 
for stage_num, (stage_name, stage_args_) in enumerate(args.stages.items()):

    print (yellow(f' - Starting stage "{stage_name}"!'))

    stage_args = munchify({**vars(args), **stage_args_ })

    if hasattr(model, 'module') and hasattr(model.module, 'feature_extractor'): 
    #stage_args.fix_feature_extractor:
        set_param_grad(model.module.feature_extractor, value=not stage_args.fix_feature_extractor, set_eval_mode=False)
        # set_param_grad(model.module.feature_extractor[-1], value=True, set_eval_mode=False)
    

    optimizer = get_optimizer(stage_args, model)
    scheduler = get_scheduler(stage_args, optimizer)

    if args.fp16:
        import apex 
Exemplo n.º 27
0
# Load saver
# saver = get_saver('DummySaver')

# def set_param_grad(model, value, set_eval_mode=True):
#     for param in model.parameters():
#         param.requires_grad = value

#     if set_eval_mode:
#         model.eval()

# Run
for stage_num, (stage_name,
                stage_args_) in enumerate(config['stages'].items()):

    print(yellow(f' - Starting stage "{stage_name}"!'))

    stage_args = munchify({**stage_args_, **config['train_args']})

    optimizers = train_factory.make_optimizers(stage_args, pipeline)
    schedulers = train_factory.make_schedulers(stage_args, optimizers,
                                               pipeline)

    print('schedulers', schedulers.keys())
    train_factory.set_trainable(stage_args, pipeline)
    criterions = train_factory.make_criterions(stage_args)

    print('criterions', criterions.keys())

    if stage_args.parallel:
        pipeline = nn.DataParallel(pipeline)
Exemplo n.º 28
0
	print('este script só é portado para Linux')
	exit()

resultado = None
DadosArray = []

func = (lambda x: x * '-=-')
translate = Translator(service_urls = [
	'translate.google.com'
])

print(yellow('''_       __
  _ _  __   _   _ _| | __ / | __  __
 / ` |/ _ \ / _ \ / _` | |/ _ \ | / _ \ \/ /
| (| | () | () | (| | |  _/  _| () >  <
 \_, |\_/ \_/ \, ||\__||  \__//\_\
 |_/             |_/
 # Telegram: @Foxxer_SA
 # Se inscreve lá @AcervoHackerBR!!!
 # GitHub: @foxx3r'''))
print(func(10))
print('\033[1;41mEvite utilizar acentos, ainda estamos trabalhando nisso!!!\033[0;0m')
sleep(1)
print(func(10))

while True:
	opcao = int(input('1) detectar idioma\n2) traduzir\n3) listar linguagens\n4) ler dados de um arquivo\n5) ler um conjunto\n6) sair\n-> '))
	print(func(10))
	
	if opcao == 1:
		detectar = str(input('digite algo -> '))