Пример #1
0
def synthesis_json(user_id, org_audio_path, start_transcript,
                   end_transcript):  # 음성 합성할 때 쓸 synthesis_list.json 변경
    user_name = str(user_id)  #user_id로 speaker이용
    path_dir = str(org_audio_path)  #변환 대상이 될 ted영상의 경로
    file_list = []
    ted_id = int(start_transcript.split('_')[0])
    start_point = int(start_transcript.split('_')[1])
    end_point = int(end_transcript.split('_')[1])
    for j in range(start_point, end_point + 1):
        file_list.append(str(ted_id) + '_' + str(j))
    filename = 'datasets/english/synthesis_list_' + str(user_id) + '.json'

    yml = bios.read('./config/convert.yaml')
    yml['synthesis_list'] = filename

    f = open('./' + filename, "w")
    synthesis = []  # 초기화
    for i in file_list:
        fileplace = path_dir[2:] + i  # ted영상의 path
        save_name = user_name + '_' + i  # 변환된 파일 이름
        a = [str(fileplace), user_name, save_name]
        if a not in synthesis:
            synthesis.append(a)

    f.write(str(synthesis))
    f.close()

    with open('./' + filename, 'w', encoding='utf-8') as make_file:

        json.dump(synthesis, make_file, indent="\t")  # 추가 완료

    print('synthesis success')
Пример #2
0
    def extract_answers(self):
        domain_file_path = "../../bot/domain.yml"
        domain_file_dict = bios.read(domain_file_path)

        with open('domain_answers.csv', mode='w',
                  newline='') as domain_answers_file:
            domain_answers_writer = csv.writer(domain_answers_file,
                                               delimiter=',',
                                               quotechar='"',
                                               quoting=csv.QUOTE_MINIMAL)

            for key in domain_file_dict["responses"].keys():
                key = key
                custom_key = 0
                for custom in domain_file_dict["responses"][key]:
                    for answer in custom["custom"]["answers"]:
                        answer_type = answer["type"]

                        if answer_type == "html":
                            answer_export = "text"
                        else:
                            answer_export = answer_type

                        try:
                            domain_answers_writer.writerow([
                                key, "custom_{}".format(custom_key),
                                answer_type, answer[answer_export]
                            ])
                        except:
                            domain_answers_writer.writerow([
                                key, "custom_{}".format(custom_key),
                                answer_type, answer
                            ])
                    custom_key += 1
Пример #3
0
def retrieve_swag():
    try:
        yaml_location = swagger_jacker()
        swagger_jacked = bios.read('Yaml_out.yaml')
        total_fuzzable_gets = 0
        api_servers, api_methods = key_play(swagger_jacked)
        print("-" * 25)
        print("Api Endpoint Location :" + api_servers)
        print("-" * 25)
        print("Api Methods" + "\n")
        print("Method Count: " + str(len(api_methods)))
        print("-" * 25)
        for method in api_methods:
            if method:
                matchy = re.findall('{(.+?)}', method)
                #print(method)
                if matchy:
                    #print(method)
                    for matches in matchy:
                        print("Fuzzable Parameters Located")
                        print(matches)
                        total_fuzzable_gets += 1
                else:
                    print("method")

            else:
                pass
        print("-" * 25)
        print("Total Fuzzable Get Parameters :" + str(total_fuzzable_gets))

    except Exception as yamlError:
        print(yamlError)
Пример #4
0
def load_yml(input_path):
    """
    Load yml
    :param input_path: input file path
  """
    input = bios.read(input_path, file_type='yaml')
    # print('load_yml::input_path::'+ input_path + '::' + json.dumps(input))
    return input
Пример #5
0
def get_function_env(function_env_path):
    if not os.path.exists(function_env_path):
        return ''

    env = bios.read(function_env_path, file_type='yaml')
    env_string = ''

    for key in env:
        env_string += '-e {}={} '.format(key, env[key])

    return env_string
Пример #6
0
    def __init__(self) :
        
        try:
            self.app_settings=bios.read(api_key_file)[api_name]
            print(self.app_settings)
        except:
            raise Exception(f"can't read settings from {api_key_file}")

        try:
            self.secret_key=self.app_settings["secret_key"]
            if self.secret_key is None or self.secret_key == '' :
                raise Exception("Secret not set in config ")
        except:
            raise Exception(f"can't read secret key from {api_key_file}")
Пример #7
0
def get_configset(directory, file, collection=None):
    try:
        configset = bios.read(directory + os.sep + file, file_type='yaml')
        if configset:
            if collection and collection != 'ALL':
                rt = configset.get(collection, None)
            else:
                rt = configset
        else:
            rt = {'error': 'There is no configset. Set collection.yml on /configset directory'}
    except Exception as e:
        rt = {'error': str(e)}
        log.error(rt)
        return rt
    return rt
Пример #8
0
    def setUp(self):
        self.config = bios.read('config.yaml')
        self.server = Server(
            self.config.get('dailyGrammar').get('browsermobPath') +
            "bin/browsermob-proxy")
        self.server.start()
        self.proxy = self.server.create_proxy()
        options = webdriver.ChromeOptions()
        options.add_argument('--proxy-server={host}:{port}'.format(
            host='localhost', port=self.proxy.port))
        options.add_argument("--ignore-certificate-errors")
        options.add_argument("--window-size=1920,1080")
        if self.config.get('headless'):
            options.add_argument('--headless')
            print("Chrome is running in headless mode")

        self.driver = webdriver.Chrome(
            executable_path=ChromeDriverManager().install(), options=options)
Пример #9
0
def __load_local(name, data):
    try:
        localData = bios.read(name)
        for k in localData:
            for t in k.strip().split("-"):
                # é possivel definir no arquivo sem dados então verifica aqui se tem dados antes de processar...
                if localData[k]:
                    if t in data:
                        for kl in localData[k]:
                            __update(data[t], kl, localData[k][kl], k)
                    else:
                        data[t] = localData[k]
    except FileNotFoundError:
        pass
    except Exception as ex:
        print("*** Erro carregando dados locais : " + name)
        print(ex)
        raise ex
    return data
Пример #10
0
 async def read(self) -> Dict[str, GoveeLearnedInfo]:
     """get the last saved learning information from disk, database, ... and return it."""
     learned_info = {}
     try:
         device_dict = bios.read(self._filename)
         learned_info = {
             dacite.from_dict(data_class=GoveeLearnedInfo,
                              data=device_dict[device_str])
             for device_str in device_dict
         }
         _LOGGER.info(
             "Loaded learning information from %s.",
             self._filename,
         )
     except Exception as ex:
         _LOGGER.warning(
             "Unable to load goove learned config from %s: %s. This is normal on first use.",
             self._filename,
             ex,
         )
     return learned_info
Пример #11
0
    def collect(self):
        self._gauges = {}
        analytics = self._initialize_analyticsreporting()
        print("[", datetime.now(), "]",
              "Authorized to talk with Analytics v4 API")
        reports = helper.yamlToReportRequests(bios.read(CONFIG_FILE))
        for report in reports:
            print("[", datetime.now(), "]", "[REPORT REQUEST]", report)
            segmentsList = report['segmentsList']
            del report['segmentsList']

            response = self._requestWithExponentialBackoff(analytics, report)
            print("[", datetime.now(), "]", "RESPONSE OBTAINED")
            self._get_metrics(
                response,
                report.get('reportRequests')[0].get('viewId'),
                report.get('reportRequests')[0].get('dateRanges')[0],
                segmentsList)

            for metric in self._gauges:
                yield self._gauges[metric]
Пример #12
0
def services(request):
    service_list = Service.objects.filter(owner_id=request.user.id)

    if request.method == 'POST':
        service_name = request.POST['service-name']
        service = Service(owner=request.user, service_name=service_name,
                          service_file=request.FILES['service-file'])
        service.save()

        yaml_file = bios.read(service.service_file.path, file_type='yaml')

        yaml_file, nodes, edges = generate_visjs_graph(yaml_file)

        service.service_file_b64 = str(base64.b64encode(
            pickle.dumps(yaml_file)), "utf-8")
        service.service_file_orig_b64 = str(base64.b64encode(
            pickle.dumps(yaml_file)), "utf-8")
        service.graph_b64 = str(base64.b64encode(
            pickle.dumps({'nodes': nodes, 'edges': edges})), "utf-8")    

        service.save()

    return render(request, 'dashboard/services.html',
                  {'service_list': service_list})
Пример #13
0
        generated = launcher_api.edit_mshta_launcher(body=config)
    elif str(config.type) == "cscript":
        generated = launcher_api.edit_cscript_launcher(body=config)
    elif str(config.type) == "wscript":
        generated = launcher_api.edit_wscript_launcher(body=config)
    else:
        print("Don't know how to handle launcher of type: " + str(config.type))
        exit()

    return generated


config_file = 'config.yml'

if path.isfile(config_file):
    covenant = bios.read(config_file)
    covenant_admin_url = covenant['connection']['covenant_admin_url']
    covenant_user = covenant['connection']['covenant_user']
    covenant_pass = covenant['connection']['covenant_pass']

configuration = swagger_client.Configuration()
configuration.host = covenant_admin_url
configuration.username = covenant_user
configuration.password = covenant_pass
configuration.verify_ssl = False
configuration.get_basic_auth_token = True
configuration.debug = False

# Check our hosting files actually exist (If we have any)
if covenant['hosted_files'] is not None:
    for hosted_file in covenant['hosted_files']:
Пример #14
0
# Get the verifier code from the URL
redirect_response = input('Paste the full redirect URL here: ')
twitter.parse_authorization_response(redirect_response)

# Fetch the access token and store it as a dictionary
dict_file = twitter.fetch_access_token(access_token_url)



######################################################
#### Step 3: Storing the Keys in The YAML File #######
######################################################

# Load the current YAML file into Python as a dictionary
import bios
yaml_dict = bios.read('config.yaml')

# Pop out the twitter_api element, which is a dictionary within a dictionary
yaml_dict_new = yaml_dict.pop('twitter_api')

# Combine the current YAML dictionary with the dictionary with the access tokens from Step 2
d = dict_file.copy()
d.update(yaml_dict_new)

# Write the twitter_api key back into the new dictionary
d_final = {'twitter_api': d}

# Write the new dictionary into the YAML file in the directory
with open(r'config.yaml', 'w') as file:
    documents = yaml.dump(d_final, file)
Пример #15
0
    def import_answers(self):
        domain_file_path = "../../bot/domain.yml"
        domain_file_dict = bios.read(domain_file_path)

        #print(domain_file_dict["responses"])
        responses = {}

        with open('domain_answers_updated.csv') as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            line_count = 0
            for row in csv_reader:
                custom_level = int(row[1].split('_')[1])

                if row[0] in responses.keys():
                    if actual_level == custom_level:
                        pass
                    else:
                        responses[row[0]].append({"custom": {"answers": []}})
                        actual_level = custom_level
                else:
                    responses[row[0]] = [{"custom": {"answers": []}}]
                    actual_level = custom_level

                if row[2] == "html":
                    answer_import = "text"
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append({
                            "type":
                            row[2],
                            "{}".format(answer_import):
                            row[3]
                        })
                elif row[2] == "command":
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append(
                            json.loads(row[3].replace("\'", "\"")))
                elif row[2] == "hints":
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append(
                            json.loads(row[3].replace("\'", "\"")))
                elif row[2] == "links":
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append({
                            "type":
                            row[2],
                            "{}".format(row[2]):
                            json.loads(row[3].replace("\'", "\""))
                        })
                elif row[2] == "multichoice":
                    print(line_count)
                    print(type(row[3]))
                    print(row[3])
                    print(row[3].replace("\'", "\""))
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append(
                            json.loads(
                                (row[3].replace("\"",
                                                "\\\"")).replace("\'", "\"")))
                    #break
                else:
                    responses[
                        row[0]][custom_level]["custom"]["answers"].append({
                            "type":
                            row[2],
                            "{}".format(row[2]):
                            row[3]
                        })

                line_count += 1

        domain_updated = {
            "session_config": domain_file_dict["session_config"],
            "slots": domain_file_dict["slots"],
            "entities": domain_file_dict["entities"],
            "actions": domain_file_dict["actions"],
            "intents": domain_file_dict["intents"],
            "responses": responses
        }

        print(responses)
        print(line_count)
        bios.write('../../bot/domain_updated.yml', domain_updated)
Пример #16
0
import yaml
import os, sys
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PROJECT_ROOT)

# try:
#     with open(PROJECT_ROOT + os.sep + 'yamltest' + os.sep + 'collection.yml') as f:
#         configset = yaml.load(f)
#     collections_l = configset.keys()
#     print(collections_l)
#     oj_knkeys = configset["oj_kn"].keys()
#     print(oj_knkeys)
# except Exception as e:
#     print({'error': str(e)})


import bios
configset = bios.read(PROJECT_ROOT + os.sep + 'yamltest' + os.sep + 'collection.yml', file_type='yaml')
print(configset)
collections_l = configset.keys()
print(collections_l)
oj_knkeys = configset["oj_kn"].keys()
Пример #17
0
def read_cfg(cfg_path: str) -> dict:
    return bios.read(cfg_path)
Пример #18
0
def main():

    logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG)

    ## look in opt/import or wherever confs are stored for a set of subdirectories to use for import.
    distrodirs = [f.path for f in os.scandir(distropath) if f.is_dir()]
    logging.debug(f"distro directories: {distrodirs}")

    cobble = cobblerInterface()

    ## if yaml file exists import.
    distro_count = 0  ## this is a check
    for dPath in distrodirs:
        # dPath = os.path.join(f"/{distro}/")
        dFile = os.path.join(f"{dPath}/distro.yaml")

        if os.path.isfile(dFile):
            logging.debug(f"Adding Distro From: {dPath}")
            distro_doc = bios.read(dFile)

            logging.debug(f"Distro Data: {distro_doc}")
            cobble.add_distro(dPath=dPath, distro_data=distro_doc)
            distro_count += 1
        else:
            logging.debug(f"No distro file found in {dPath}")

    if distro_count == 0:
        exit(return_codes["no_distro_files_found"])

    profiles_file = f"{confpath}/profiles.yaml"

    if os.path.isfile(profiles_file):
        logging.debug(f"profiles file {profiles_file} located. loading...")
        profiles = bios.read(profiles_file)
        logging.debug(f"Profiles: {profiles}")
        for label, prof in profiles.items():
            logging.debug(f"adding profile {label}")
            cobble.add_profile(prof)
    else:
        logging.error("no profiles found, can not proceed. exiting.")
        exit(return_codes["no_profiles_found"])

    # hosts_file = f"{confpath}/hosts.yaml"

    hosts_found = 0
    for hostFile in os.listdir(hostspath):
        if hostFile.endswith(".yml") or hostFile.endswith(".yaml"):
            hosts_found = 1
            logging.debug(f"hosts file {hostFile} located. loading...")
            hosts = bios.read(hostspath + "/" + hostFile)
            # logging.debug(f"Hosts: {hosts}")
            for label, host in hosts.items():
                logging.debug(f"add host: {host}")
                cobble.add_host(host)
    if not hosts_found:
        logging.error("no hosts found, can not proceed. exiting.")
        exit(return_codes["no_hosts_found"])

    cobble.sync()

    exit(0)
Пример #19
0
 def __init__(self, bt, color = 255, pos = [0, 0]):
     self.bt = bios.read(bt)
     self.pos = pos
     self.pos[0] -= len(self.bt[0])//2 + 1
     self.color = color
def lambda_handler(event, context):
    # copy river level alerts yaml from s3 using RIVER_LEVEL_ALERTS_
    rules_tmp_file = '/tmp/river-level-alerts-rules.yaml'

    if 'RIVER_LEVEL_ALERTS_RULES_YAML' in os.environ:
        print(
            f"Using rules from environment variable RIVER_LEVEL_ALERTS_RULES_YAML={os.environ['RIVER_LEVEL_ALERTS_RULES_YAML']}"
        )
        if os.environ['RIVER_LEVEL_ALERTS_RULES_YAML'].startswith('s3://'):
            s3_path = os.environ['RIVER_LEVEL_ALERTS_RULES_YAML'][5:]
            s3_components = s3_path.split('/')
            bucket = s3_components[0]
            s3_key = '/'.join(s3_components[1:])
            s3 = boto3.resource('s3')
            s3.Bucket(bucket).download_file(s3_key, rules_tmp_file)
        else:
            shutil.copyfile(os.environ['RIVER_LEVEL_ALERTS_RULES_YAML'],
                            rules_tmp_file)
    else:
        print(
            "Using default rules from example_alert_rules.yaml, override with RIVER_LEVEL_ALERTS_RULES_YAML which can read from s3 using s3://some_bucket/your_rules.yaml"
        )
        shutil.copyfile('example_alert_rules.yaml', rules_tmp_file)

    alerts = bios.read(rules_tmp_file)
    print(alerts)

    df = get_river_levels_df()
    print(df)

    messages = ""

    for alert in alerts:
        level_for_location = df[df['Location'] ==
                                alert['Location']]['Level'].values[0]
        if alert['Direction'] == 'above':
            if level_for_location > alert['Level']:
                messages += alert_messsage(alert, level_for_location)
        elif alert['Direction'] == 'below':
            if level_for_location < alert['Level']:
                messages += alert_messsage(alert, level_for_location)
        else:
            print(
                f"alert for {alert['Location']} has invalid direction {alert['Direction']}"
            )
            os.sys.exit(1)

    messages += 'https://www.waikatoregion.govt.nz/services/regional-services/river-levels-and-rainfall/river-levels-and-flow-latest-reading/'

    print(messages)

    if messages != "":
        ses = boto3.client('ses')
        response = ses.send_email(
            Source=os.environ['RIVER_LEVEL_ALERTS_EMAIL_ADDRESS'],
            Destination={
                'ToAddresses': [
                    os.environ['RIVER_LEVEL_ALERTS_EMAIL_ADDRESS'],
                ]
            },
            Message={
                'Subject': {
                    'Data': 'River level alert'
                },
                'Body': {
                    'Text': {
                        'Data': messages
                    }
                }
            })

    response = {"statusCode": 200, "body": messages}

    return response
Пример #21
0
import math
import re
from collections import Counter
import pkg_resources
from urllib.request import urlopen
import os

URL_BASE_OF_CONTENTS = "https://raw.githubusercontent.com/bilgehannal/bringer-data/main/contents"
URL_CONTENT_FILE = "https://raw.githubusercontent.com/bilgehannal/bringer-data/main/content.yaml"

WORD = re.compile(r"\w+")

f = urlopen(URL_CONTENT_FILE)
tmp_str = f.read().decode("utf-8") 
bios.write('bringer_content.yaml', tmp_str, file_type='standart')
contents = bios.read('bringer_content.yaml')

def text_to_vector(text):
    words = WORD.findall(text)
    return Counter(words)

def get_cosine(text1, text2):
    vec1 = text_to_vector(text1.lower())
    vec2 = text_to_vector(text2.lower())

    intersection = set(vec1.keys()) & set(vec2.keys())
    numerator = sum([vec1[x] * vec2[x] for x in intersection])

    sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())])
    sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())])
    denominator = math.sqrt(sum1) * math.sqrt(sum2)
Пример #22
0
    filepath = f"{args.tmp_folder}/csv/frame{frame_num}.csv"
    with open(filepath, 'w', newline='') as f:
        writer = csv.writer(f)
        for d in detections:
            writer.writerow(d)


if __name__ == "__main__":
    args = load_args()
    if args.detector == "ssd":
        detector = SSDDetector()
    elif args.detector == "yolo":
        detector = YOLODetector()
    elif args.detector == "fchd":
        detector = FCHDDetector()
    detector_cfg = bios.read(args.cfg)[args.detector]

    video = imageio.get_reader(args.video, "ffmpeg")
    for x in range(0, int(args.frame_cnt)):
        logger.info(f"Performing detection on frame {x+1}/{args.frame_cnt}")
        image = video.get_data(x)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        frame_num_str = ''.join('{:05d}'.format(x))
        image_path = f"{args.tmp_folder}/img/frame{frame_num_str}.jpeg"
        cv2.imwrite(image_path, image)

        detections = detector.find_heads(img_path=image_path, cfg=detector_cfg)
        detections_to_csv(detections=detections, frame_num=x)

        # since there where problems with operating on last frames of videos,
        # I assume that the last frame was just the same as previous one
Пример #23
0
import bios
from os import environ
from os import path

if path.exists("env.yaml"):
    variables = bios.read('env.yaml')
    for key in variables.keys():
        environ[key] = variables[key]
Пример #24
0
import bios
import os
import time

from run_fchd import FCHDDetector
from run_yolo import YOLODetector
from run_ssd import SSDDetector

TEST_IMG_DIR = "test_imgs/"
IMAGE_PATHS = os.listdir(TEST_IMG_DIR)
IMAGE_PATHS = [TEST_IMG_DIR + path for path in IMAGE_PATHS]
cfg = bios.read("config.yaml")

fchd = FCHDDetector()
yolo = YOLODetector()
ssd = SSDDetector()

if __name__ == "__main__":
    for img in IMAGE_PATHS[6:]:
        print(img)

        start = time.time()
        res = fchd.find_heads(img_path=img, cfg=cfg["fchd"])
        end = time.time()
        print("fchd time", end - start)

        start = time.time()
        res = yolo.find_heads(img_path=img, cfg=cfg["yolo"])
        end = time.time()
        print("yolo time", end - start)