Esempio n. 1
0
def load_lasagne_nn(json_name, weights_name):
    info = read_json(json_name)
    nn = LasagneNN(architecture=info['arch'],
                   dim=info['dim'],
                   params=info['params'])
    nn.load_weights(pickle.load(open(weights_name, 'rb'))['weights'])
    return info, nn
Esempio n. 2
0
def register():
    teacher_name = input("请输入你的帐号姓名(3~6位):")
    teacher_pwd = input("请输入你的帐号密码(6~12位):")
    teacher_dict = file_utils.read_json("teacher_data.json", {})
    t = modul.Teacher(teacher_name, teacher_pwd)
    if teacher_name not in teacher_dict:
        teacher_dict[t.name] = t.pwd
        file_utils.write_json(teacher_dict, "teacher_data.json")
    else:
        print("已存在的用户,请重新输入:")
Esempio n. 3
0
def login():
    if file_utils.read_file("teacher_data.json"):
        teacher_name = input("请输入你的帐号姓名:")
        teacher_pwd = input("请输入你的帐号密码:")
        teacher_dict = file_utils.read_json("teacher_data.json", {})
        if teacher_dict.get(teacher_name) == teacher_pwd:
            student_utils.manager_stu()
        else:
            print("账户名字或者密码不正确!")
    else:
        print("当前teacher账户为空,请先注册")
Esempio n. 4
0
def manager_stu():
    while True:
        print(file_utils.read_file("student_ui.txt"))
        num = input("请选择(1-5):")
        student_list = file_utils.read_json("student_data.json", [])
        if num == "1":
            add_stu(student_list, "student_data.json")
        elif num == "2":
            search_stu(student_list)
        elif num == "3":
            mod_stu(student_list)
        elif num == "4":
            del_stu(student_list)
        elif num == "5":
            break
        else:
            print("输入错误")
Esempio n. 5
0
            if i in summary_sentence_indexes:
                markdown += '  ***' + raw_sentences[i] + '***'
            else:
                markdown += raw_sentences[i]

        elif mode is TRIM:
            if i in summary_sentence_indexes:
                markdown += '  ' + raw_sentences[i]

    return markdown

def transform_slider_input(x):
    return str(int(x/10))

# Read processed summary file
manifest = file_utils.read_json('summary_output.json')

# Get specific pieces of data
audio_url = manifest['audio_url']
sentences = manifest['sentences']
summary_indices = manifest['summary_indices']


# ========== UI ==================================================

streamlit.title('Audio Summarizer')

# Audio Player
streamlit.markdown('## Original Audio')
streamlit.audio(audio_url, format='audio/mp3', start_time=0)
Esempio n. 6
0
def match_sites_dataframe(dataframe, matches_json="", top_n=5):
    '''
    Generates a dataframe of matched sites.
    matches_json is an optional parameter for saving and loading slow to generate
    description based matches.
    INPUTS:
     - dataframe
     - matches_json -- A string representing the filename of a json file containing old matches to speed up processing
     - top_n (int) -- Maximum amount of matches to return for each item
    OUTPUTS:
     - matches_df
    '''

    #Missing values should be represented by empty strings
    dataframe = dataframe.fillna(value="")

    #Ensure we have the correct columns
    dataframe = pandas.DataFrame(dataframe.to_dict("records"),
                                 columns=ALL_FIELDNAMES)

    #Fill any columns we just added with "-1" to mark it wasn't originally there
    dataframe = dataframe.fillna(value="-1")

    #Make sure everything in that dataframe is a string
    dataframe = dataframe.applymap(lambda x: str(x))

    #Remove extra whitespace
    dataframe = dataframe.applymap(lambda x: x.strip()
                                   if type(x) == str else x)

    if "Match Site" in dataframe.columns:
        ndf = dataframe[dataframe["Match Site"] == "-1"]
        if ndf.empty:
            #No new rows.
            return pandas.DataFrame()
        odf = dataframe[dataframe["Match Site"] != "-1"]
        if odf.empty:
            old_rows = []
        else:
            old_rows = odf.to_dict("records")
        new_rows = ndf.to_dict("records")
    else:
        new_rows = dataframe.to_dict("records")
        old_rows = []
    # Add a 'Description' field to new_rows
    site_rows = [{
        **row, "Description": row["Stock Description"]
    } for row in new_rows]
    old_site_rows = remove_duplicate_rows(old_rows)
    old_item_ids_to_rows = generate_item_ids_to_rows(old_rows)

    # Generate desc_matches based on matches_json
    desc_matches = {}
    if matches_json:
        if file_utils.file_exists(matches_json):
            desc_matches = file_utils.read_json(matches_json)
        else:
            desc_matches = match_by_description(site_rows, old_site_rows)
            file_utils.save_json(matches_json, desc_matches)

    matches_rows = match_sites(site_rows,
                               old_site_rows,
                               old_item_ids_to_rows,
                               desc_matches,
                               top_n=top_n)
    matches_df = pandas.DataFrame(matches_rows, columns=OUTPUT_FIELDNAMES)
    matches_df = matches_df.fillna(value="")
    matches_df = matches_df[OUTPUT_FIELDNAMES]
    return matches_df