Exemple #1
0
def read_json():
    return_dict = read_excel('test_down')
    time = return_dict['time']
    #
    with open('y_pred_speed.json', 'r') as f:
        pred_speed = json.load(f)

    with open('y_pred_flow.json', 'r') as f:
        pred_flow = json.load(f)

    with open('y_true_speed.json', 'r') as f:
        true_speed = json.load(f)

    with open('y_true_flow.json', 'r') as f:
        true_flow = json.load(f)

    cut = np.where(
        time == np.datetime64('2019-04-08T08:15:00'))[0].tolist()[0] + 1
    # speed = return_dict['speed']
    # flow = return_dict['flow']

    pred_speed = np.array(pred_speed).reshape(-1, 1)
    pred_flow = np.array(pred_flow).reshape(-1, 1)
    true_speed = np.array(true_speed).reshape(-1, 1)
    true_flow = np.array(true_flow).reshape(-1, 1)

    return time[cut:], pred_speed, pred_flow, true_speed, true_flow
Exemple #2
0
def data_uploader():
    # Uploading data
    uploaded_file = st.file_uploader(
        "Upload the Sensor Data file (only .XLSX format)", type="xlsx")
    if uploaded_file is not None:
        return ut.read_excel(uploaded_file)
    else:
        return None
Exemple #3
0
def get_final_weight():
    return_dict = read_excel('train_up')
    time = return_dict['time']
    speed = return_dict['speed']
    flow = return_dict['flow']
    speed_scaler = return_dict['scaler_speed']
    flow_scaler = return_dict['scaler_flow']

    speed = speed_scaler.inverse_transform(speed)
    flow = flow_scaler.inverse_transform(flow)

    _, mw, ew, ow = cal_entropy_weight(time, speed, flow)

    return mw, ew, ow
Exemple #4
0
def create_vids_from_excel(inpfile, sheet, tmpdir, story, first_slide, last_slide, voice, user):
	mdir = os.getcwd()
	files = ['pup.js', 'blank.mp3', 'blank_long.mp3', 'logo.png']
	for f in files:
		shutil.copy(f, "{0}/{1}".format(tmpdir, f))
	os.chdir(tmpdir)
	os.mkdir('images')
	sheet = read_excel(inpfile, sheet)
	create_intro_video(sheet, voice)
	normal_videos = []
	slow_videos = []
	split_videos = []
	start = int(first_slide)
	end = int(last_slide)+1
	for i in range(start, end):
		if not sheet.cell(row=i, column=1).value:
			continue
		para = sheet.cell(row=i, column=2).value.strip()
		(json_data, json_data_slow, json_data_split) = create_audio_data(para, voice)

		images_text = ' '.join(para.strip().replace('*', '').replace('\n', ' \n ').split(' '))
#		print("images text: " + images_text)
		images = create_images(images_text, sheet.cell(row=i, column=3).value, story)

		json_data = process_json_data(json_data, voice)
		json_data_slow = process_json_data(json_data_slow, voice)
		json_data_split = process_json_data(json_data_split, voice)

		create_para_vid(1, i-start-1, json_data, images, 'audio.mp3', "vid{}".format(str(i-start+1)))
		create_para_vid(0.75, i-start-1, json_data_slow, images, 'audio_slow.mp3', "vid{}-slow".format(str(i-start+1)))
		create_para_vid(1, i-start-1, json_data_split, images, 'audio_split.mp3', "vid{}-split".format(str(i-start+1)))

		normal_videos.append("vid{}.mp4".format(str(i-start+1)))
		slow_videos.append("vid{}-slow.mp4".format(str(i-start+1)))
		split_videos.append("vid{}-split.mp4".format(str(i-start+1)))

	os.chdir(os.path.join(mdir, 'static', 'videos', user))
	if story=='1':
		os.chdir("Stories")
	else:
		os.chdir("Other Videos")
	os.mkdir(output_name)
	print(output_name)
	os.chdir(os.path.join(os.getcwd(), output_name))
	print(os.getcwd())

	concatenate_videos(["intro.mp4"]+normal_videos, "{}-fast.mp4".format(output_name), tmpdir)
	concatenate_videos(["intro.mp4"]+slow_videos, "{}-medium.mp4".format(output_name), tmpdir)
	concatenate_videos(["intro.mp4"]+split_videos, "{}-slow.mp4".format(output_name), tmpdir)
Exemple #5
0
def rebalance(spread_sheet_name, idx_name):
    portfolio_fname = 'portfolio-analysis-' + idx_name + '.p'
    bad_fname = 'not-portfolio-analysis-' + idx_name + '.p'

    port_df = u.read_pickle(portfolio_fname)
    bad = u.read_pickle(bad_fname)
    spread_sheet = u.read_excel(spread_sheet_name)

    # get current allocation
    current_alloc = s.get_portfoilo_alloc(spread_sheet)
    tickers = spread_sheet.ticker.to_list()

    bad_list = []
    new_port_df = pd.DataFrame()
    # kick out current port members that did not make the cut
    for ticker in tickers:
        if len(bad[bad.ticker == ticker]) == 0:  # not found
            tmp_df = port_df[port_df.ticker == ticker]
            if len(tmp_df) == 0:
                log.info('ticker not found {}'.format(ticker))
                bad_list.append({'ticker': ticker, 'notes': 'Not found'})
                continue
            new_port_df = new_port_df.append(tmp_df)
        else:
            bad_list.append({'ticker': ticker, 'notes': 'SELL'})
            log.info('ticker {} did not make the cut. dropping from portfolio'.
                     format(ticker))

    # replace kicked out entries
    if len(bad_list):
        not_in_port = port_df[~port_df.ticker.isin(new_port_df.ticker)].dropna(
        )
        new_members = not_in_port[:len(bad_list)].copy()
        new_members['notes'] = 'NEW BUY'
        new_port_df = new_port_df.append(new_members)

    r.do_allocation(new_port_df, len(new_port_df))

    new_port_df = new_port_df.append(bad_list, ignore_index=True)

    sheet_name = 'portfolio-' + str(date.today())

    # add sheets to spreadsheet
    u.append_worksheet_to_excel(spread_sheet_name, sheet_name, new_port_df)
    u.append_worksheet_to_excel(spread_sheet_name, 'current-alloc',
                                current_alloc)
def create_vids(inpfile, sheet_name, tmpdir, story, first_slide, last_slide,
                audio_in, user):
    mdir = os.getcwd()
    files = ['pup.js', 'blank.mp3', 'blank_long.mp3', 'logo.png']
    for f in files:
        shutil.copy(f, "{0}/{1}".format(tmpdir, f))
    os.chdir(tmpdir)
    os.mkdir('images')
    audio_files = split_audio_on_silence(audio_in)
    print(audio_files)
    sheet = read_excel(inpfile, sheet_name)
    create_intro_video(sheet, audio_files[0])
    page_videos = []
    start = int(first_slide)
    end = int(last_slide) + 1
    for i in range(start, end):
        if not sheet.cell(row=i, column=1).value:
            continue

        page_no = i - start + 1

        para = sheet.cell(row=i, column=2).value.strip()
        image_text = ' '.join(para.strip().replace('*', '').replace(
            '\n', ' \n ').split(' '))
        image = create_image(image_text,
                             sheet.cell(row=i, column=3).value, page_no, story)

        audio_file = audio_files[page_no]
        create_page_vid(page_no, audio_file, image)

        page_videos.append("{}.mp4".format(page_no))

    os.chdir(os.path.join(mdir, 'static', 'videos', user))

    if story == '1':
        os.chdir("Stories")
    else:
        os.chdir("Other Videos")

    os.mkdir(output_name)

    os.chdir(os.path.join(os.getcwd(), output_name))

    concatenate_videos(["intro.mp4"] + page_videos,
                       "{}.mp4".format(output_name), tmpdir)
Exemple #7
0
def get_S(fname):
    df = read_excel(fname, header=[0])
    df.head()

    freq = np.asarray(df.index)

    dx = 0.01
    # gT = graphTool((2,1), (10,10))

    S11_DB = df[('log_s11')]
    S11_phase = df[(('phase_s11'))]
    S11 = get_S_parameter(S11_DB, S11_phase)
    S11 = S11.reshape(1, 801)

    S21_DB = df[('log_s21')]
    S21_phase = df[(('phase_s21'))]
    S21 = get_S_parameter(S21_DB, S21_phase)
    S21 = S21.reshape(1, 801)
    return freq, S11, S21
Exemple #8
0
    help = "Columnfile in which the value of each column is specified (needed for " +
        ".xls(x) and .txt files, not for .json files")
parser.add_argument('--header', action = 'store_true', 
    help = "Specify if the file contains a header")
args = parser.parse_args() 

# read in file
if args.i[-3:] == "xls" or args.i[-4:] == "xlsx":
    # make sure date and time fields are correctly processed
    indexline = utils.read_columnfile(args.c)
    date, time = False, False
    if indexline[3] != "-":
        date = indexline[3]
    if indexline[4] != "-":
        time = indexline[4]  
    lines = utils.read_excel(args.i, args.header, date, time)
elif args.i[-4:] == "json":
    csvrows = utils.read_json(args.i)
else: # txt file
    with open(args.i, encoding="utf-8") as fn:
        lines = [x.strip().split("\t") for x in fn.readlines()]
        if args.header:
            lines = lines[1:]

# with open(args.o, 'w') as csvfile:
#     writer = csv.writer(csvfile)
#     for line in lines:
#         writer.writerow(line)
# quit()

# set columns of lines in right order
Exemple #9
0
import pandas as pd
import numpy as np
from utils import read_excel, graphTool

df = read_excel('NRW~1\MeasData(19-12-23 17-10-06)_CNT.xls')
df.columns
gT = graphTool(figshape=(1,1), figsize= (5,5))


freq = np.asarray(df.index)
S11 = np.exp(df[('LOG MAG [dB]', 'S11')].to_numpy())
S11_i  =
S21 = np.exp(df[('LOG MAG [dB]', 'S21')].to_numpy())

gT.plot(ax_num=(0,0), x = freq, y = S11_logMagnitude, c='r', label ='S11')
gT.plot(ax_num=(0,0), x = freq, y = S21_logMagnitude, c='b', label ='S21')
**2 
S11**2