Ejemplo n.º 1
0
def handleInput():
    downloadQueue = []

    #data.txt is the file where the front end leaves the data for us
    inputText = core.read('/opt/lampp/htdocs/data.txt')
    print(inputText)
    core.write('/opt/lampp/htdocs/data.txt', '')
    #inputText is split into an array so that we can interact with each line
    #individually (Different URLs are put on different lines by the frontend).
    inputArray = core.split(inputText)
    print(inputArray)

    i = 0
    #This loop allows us to perform the same function
    #on each line of the array. In this case it goes through each line and sorts
    #it into a video url, a playlist url, or random text. And then the
    #appropriate response is carried out
    while i < len(inputArray):
        inputData = inputArray[i]
        print(inputData)

        if 'list=' in inputData:  #'list=' is unique to playlist urls
            core.log('playlists', inputData)
        if 'v=' in inputData:  #v= is unique to video urls
            downloadQueue.append(inputData)

        i = i + 1

    return downloadQueue
Ejemplo n.º 2
0
def playlistCheck():
    playlists = core.split(core.read('playlists'))

    i = 0
    while i < len(playlists):
        if len(playlists[0]) > 2:
            core.write(playlists[i].split('list=')[1], '')
            playlist(playlists[i])
        i = i + 1

    return
Ejemplo n.º 3
0
def watchedCheck(playlistID, videoIDs):
    unwatchedVideoIDs = []
    watchedList = core.read(playlistID)
    watchedIDs = core.split(watchedList)
    i = 0
    while i < len(videoIDs):
        if videoIDs[i] not in watchedIDs:
            unwatchedVideoIDs.append(videoIDs[i])
        i = i + 1

    return unwatchedVideoIDs
Ejemplo n.º 4
0
def peak_detection_fxn(data_y):
    """The function takes an input of the column containing the y variables in the dataframe,
    associated with the current. The function calls the split function, which splits the
    column into two arrays, one of the positive and one of the negative values.
    This is because cyclic voltammetry delivers negative peaks, but the peakutils function works
    better with positive peaks. The function also runs on the middle 80% of data to eliminate
    unnecessary noise and messy values associated with pseudo-peaks.The vectors are then imported
    into the peakutils.indexes function to determine the significant peak for each array.
    The values are stored in a list, with the first index corresponding to the top peak and the
    second corresponding to the bottom peak.
    Parameters
    ______________
    y column: must be a column from a pandas dataframe
    Returns
    _____________
    A list with the index of the peaks from the top curve and bottom curve.
    """

    # initialize storage list
    index_list = []

    # split data into above and below the baseline
    col_y1, col_y2 = core.split(data_y)

    # detemine length of data and what 10% of the data is
    len_y = len(col_y1)
    ten_percent = int(np.around(0.1 * len_y))

    # adjust both input columns to be the middle 80% of data
    # (take of the first and last 10% of data)
    # this avoid detecting peaks from electrolysis
    # (from water splitting and not the molecule itself,
    # which can form random "peaks")
    mod_col_y2 = col_y2[ten_percent:len_y - ten_percent]
    mod_col_y1 = col_y1[ten_percent:len_y - ten_percent]

    # run peakutils package to detect the peaks for both top and bottom
    peak_top = peakutils.indexes(mod_col_y2, thres=0.99, min_dist=20)
    peak_bottom = peakutils.indexes(abs(mod_col_y1), thres=0.99, min_dist=20)

    # detemine length of both halves of data
    len_top = len(peak_top)
    len_bot = len(peak_bottom)

    # append the values to the storage list
    # manipulate values by adding the ten_percent value back
    # (as the indecies have moved)
    # to detect the actual peaks and not the modified values
    index_list.append(peak_top[int(len_top / 2)] + ten_percent)
    index_list.append(peak_bottom[int(len_bot / 2)] + ten_percent)

    # return storage list
    # first value is the top, second value is the bottom
    return index_list
Ejemplo n.º 5
0
def data_analysis(df):
    results_dict = {}

    # df = main.data_frame(dict_1,1)
    x = df['Potential']
    y = df['Current']
    # Peaks are here [list]
    peak_index = core.peak_detection_fxn(y)
    # Split x,y to get baselines
    x1, x2 = core.split(x)
    y1, y2 = core.split(y)
    y_base1 = core.linear_background(x1, y1)
    y_base2 = core.linear_background(x2, y2)
    # Calculations based on baseline and peak
    values = core.peak_values(x, y)
    Et = values[0]
    Eb = values[2]
    dE = core.del_potential(x, y)
    half_E = min(Et, Eb) + core.half_wave_potential(x, y)
    ia = core.peak_heights(x, y)[0]
    ic = core.peak_heights(x, y)[1]
    ratio_i = core.peak_ratio(x, y)
    results_dict['Peak Current Ratio'] = ratio_i
    results_dict['Ipc (A)'] = ic
    results_dict['Ipa (A)'] = ia
    results_dict['Epc (V)'] = Eb
    results_dict['Epa (V)'] = Et
    results_dict['∆E (V)'] = dE
    results_dict['Redox Potential (V)'] = half_E
    if dE > 0.3:
        results_dict['Reversible'] = 'No'
    else:
        results_dict['Reversible'] = 'Yes'

    if half_E > 0 and 'Yes' in results_dict.values():
        results_dict['Type'] = 'Catholyte'
    elif 'Yes' in results_dict.values():
        results_dict['Type'] = 'Anolyte'
    return results_dict, x1, x2, y1, y2, y_base1, y_base2, peak_index
Ejemplo n.º 6
0
def main():
	clear()

	create()
	data = core.read('info')
	core.log('log', data)
	data = core.split(data)

	createPlaylist()
	playlistUrls = core.read('playlistUrls')
	core.log('log', playlistUrls)
	playlistUrls = core.split(playlistUrls)

	search(data)

	videos = core.read('videos')
	core.log('log', videos)
	videos = core.split(videos)


	download(data, videos)

	clear()
Ejemplo n.º 7
0
def split_words(input):
    try:
        answer = split(input)[0]
    except IndexError:
        return dict(input=input, data=[], translations={})

    query = [input] + answer

    translator = GoogleTranslator()
    translations = translator.translate(query=query, source="is", target="en")

    response = [(source, html.unescape(translated['translatedText'])) for (source, translated) in zip(query, translations)]

    return dict(input=input, data=answer, translations=dict(response))