Esempio n. 1
0
def run_script_func(script_hex_id):
    application = Application.query.filter_by(hex_id=script_hex_id).first()
    scriptn = script_hex_id + '.py'
    script_path = os.path.join(root_folder, scriptn)

    if operating_system() == 'Windows':
        py_cmd = 'py '

    else:
        py_cmd = 'python3 '

    if application.run_in_bkrd is False:
        out = subprocess.run(py_cmd + script_path,
                             stdout=subprocess.PIPE,
                             shell=True,
                             encoding='utf-8')

        if out.returncode == 0:
            out = str(out.stdout)

        else:
            out = 'error'

        return out

    else:
        get_stats(force=True)
        t = Process(target=start_bkrd_script,
                    args=(py_cmd, script_path, application))
        t.start()
        out = 'Process Started!'
        sleep(3)
        return out
Esempio n. 2
0
def remoteInputAccess(function, file_path, error_msg):
    """
    Wrapper around the provided file access function.
    It allows re-trying the open/read operation if network is temporarily down.
    Arguments:
        function: function to be called to read/open the file.
        file_path: path of the file to be read/open.
    """
    num_trials = 1
    bad_sep = "/" if operating_system() == "Windows" else "\\"
    file_path = file_path.replace(bad_sep, os.path.sep)
    root = os.path.sep.join(file_path.split(os.path.sep)[:4])

    while num_trials <= MAX_READ_TRIALS:
        try:
            obj = function(str(file_path))
            if num_trials > 1:
                print(
                    "File {0} succesfully accessed after {1} attempts".format(
                        file_path, num_trials))
            num_trials = MAX_READ_TRIALS + 1
        except IOError:
            if os.path.exists(root) and not os.path.exists(file_path):
                raise LisfloodFileError(file_path, error_msg)
            elif num_trials == MAX_READ_TRIALS:
                raise Exception(
                    "Cannot access file {0}!\nNetwork down for too long OR bad root directory {1}!"
                    .format(file_path, root))
            else:
                num_trials += 1
                print("Trying to access file {0}: attempt n. {1}".format(
                    file_path, num_trials))
                xtime.sleep(READ_PAUSE)
    return obj
Esempio n. 3
0
def welcome():
    register.is_public = True

    form = AddVector()

    vectors = Vectors.query.all()

    settings = Settings.query.first()

    if form.validate_on_submit():
        anki_conf = AnkiConf(email=form.email.data,
                             password=form.password.data,
                             serial=form.vector_serial.data,
                             ip=form.vector_ip.data,
                             name=form.vector_name.data)

        db.session.add(anki_conf)
        db.session.commit()

        conf_path = os.path.join(root_folder, 'configure.py')
        if operating_system() == 'Windows':
            py_cmd = 'py '

        else:
            py_cmd = 'python3 '
        cmd = py_cmd + conf_path
        out = subprocess.run(cmd,
                             stdout=subprocess.PIPE,
                             shell=True,
                             encoding='utf-8')
        flash(str(out.stdout), 'success')

        init_vectors()

        top_vector = Vectors.query.first()
        if form.vector_serial.data == top_vector.serial:
            config = ConfigParser()
            config.read(sdk_config_file)
            config[top_vector.serial]['default'] = 'True'

            with open(sdk_config_file, 'w') as configfile:
                config.write(configfile)

        db.session.query(AnkiConf).delete()
        db.session.commit()
        return redirect(url_for('user_system.register'))

    settings.first_run = False
    db.session.merge(settings)
    db.session.commit()

    return render_template('user/welcome.html',
                           title='Welcome',
                           vectors=vectors,
                           form=form)
Esempio n. 4
0
def test_user_input_combination(config, target_str, os_str):
    """Test for all the valid user input combinations that both the target and
    the operating system match.
    """
    platform = spack.platforms.Test()
    spec_str = "libelf"
    if os_str != "default_os":
        spec_str += " os={0}".format(os_str)
    if target_str != "default_target":
        spec_str += " target={0}".format(target_str)
    spec = spack.spec.Spec(spec_str).concretized()

    assert spec.architecture.os == str(platform.operating_system(os_str))
    assert spec.architecture.target == platform.target(target_str)
Esempio n. 5
0
def download_image(link):
    '''
    # Let the user know we are trying to download an image at the given link
    # Prepare the command (wget) to download the image
    # If wget doesn't exist, we raise a FileNotFound error
    # Otherwise, if we're on Windows, modify the wget command to avoid an issue
    #     with GnuWin wget and SSL
    # Finally, we run the constructed command to download the image
    '''

    global args

    # --no-check-certificate is used on windows because GnuWin wget fails to
    #   verify all certificates for some reason

    link = clean_link(link)

    log.i('downloading: ' + link)
    wget_command = [which('wget'), '-b', '-N', '-o', '/dev/null', link]
    if (which('wget') is None):
        raise WGetNotFoundException('Could not find wget')
    elif operating_system() == 'Windows':
        log.d('on windows, adjusting wget_command')
        wget_command = [
            which('wget'), '-b', '-N', '-o', 'NUL', '--no-check-certificate',
            link
        ]

    try:
        while call(wget_command) != 0:
            time.sleep(.05)
            log.d('call is not 0')
            log.i('retrying...')
    except BlockingIOError:
        time.sleep(.1)
        log.d('BlockingIOError!')
        log.w('retrying...')
        download_image(link)
Esempio n. 6
0
    def __init__(self):
        self.os_name = operating_system()
        self.current_directory = path.dirname(path.realpath(__file__))

        self.plot_available_funds = False
        self.show_available_funds = True
        self.show_figure = True
        self.save_figure = False

        year = 365.25
        month = year / 12

        self.middle_column = True

        middle_months = 12
        self.middle_period = middle_months * month
        self.middle_label = "year" if middle_months == 12 else f"{middle_months} months"

        end_months = 3
        self.end_period = end_months * month
        self.end_label = f"{end_months} months"

        config = ConfigParser()
        config.read(SETTINGS_FILE)

        login = config["login"]

        self.username = login["username"]
        self.password = login["password"]

        self.school = login["school"]
        self.friend = login["friend"]
        self.where = login["where"]

        program = config["program"]

        self.chrome_driver_directory = program["chrome_driver_directory"]
Esempio n. 7
0
File: spd.py Progetto: skwerlman/SPD
def download_image(link):
    '''
    # Let the user know we are trying to download an image at the given link
    # Prepare the command (wget) to download the image
    # If wget doesn't exist, we raise a FileNotFound error
    # Otherwise, if we're on Windows, modify the wget command to avoid an issue
    #     with GnuWin wget and SSL
    # Finally, we run the constructed command to download the image
    '''

    global args

    # --no-check-certificate is used on windows because GnuWin wget fails to
    #   verify all certificates for some reason

    link = clean_link(link)

    log.i('downloading: ' + link)
    wget_command = [which('wget'), '-b', '-N', '-o', '/dev/null', link]
    if (which('wget') is None):
        raise WGetNotFoundException('Could not find wget')
    elif operating_system() == 'Windows':
        log.d('on windows, adjusting wget_command')
        wget_command = [which('wget'), '-b', '-N', '-o', 'NUL',
                        '--no-check-certificate', link]

    try:
        while call(wget_command) != 0:
            time.sleep(.05)
            log.d('call is not 0')
            log.i('retrying...')
    except BlockingIOError:
        time.sleep(.1)
        log.d('BlockingIOError!')
        log.w('retrying...')
        download_image(link)
Esempio n. 8
0
def manage():
    init_vectors()

    form = AddVector()

    ip_form = ChangeIP()

    vectors = Vectors.query.all()

    if ip_form.validate_on_submit():
        serial = ip_form.serial.data
        config = ConfigParser()
        config.read(sdk_config_file)
        config.set(serial, 'ip', ip_form.new_ip.data)

        with open(sdk_config_file, 'w') as configfile:
            config.write(configfile)
            configfile.close()
        flash('IP Address updated!', 'success')
        return redirect(url_for('manage_vectors.manage'))

    if form.validate_on_submit():
        anki_conf = AnkiConf(email=form.email.data,
                             password=form.password.data,
                             serial=form.vector_serial.data,
                             ip=form.vector_ip.data,
                             name=form.vector_name.data)

        db.session.add(anki_conf)
        db.session.commit()

        conf_path = os.path.join(root_folder, 'configure.py')
        if operating_system() == 'Windows':
            py_cmd = 'py '

        else:
            py_cmd = 'python3 '
        cmd = py_cmd + conf_path
        out = subprocess.run(cmd,
                             stdout=subprocess.PIPE,
                             shell=True,
                             encoding='utf-8')
        flash(str(out.stdout), 'success')

        init_vectors()

        top_vector = Vectors.query.first()
        if form.vector_serial.data == top_vector.serial:
            config = ConfigParser()
            config.read(sdk_config_file)
            config[top_vector.serial]['default'] = 'True'

            with open(sdk_config_file, 'w') as configfile:
                config.write(configfile)

        db.session.query(AnkiConf).delete()
        db.session.commit()
        return redirect(url_for('manage_vectors.manage'))

    err_msg = get_stats()
    if err_msg:
        flash('No Vector is Connected. Error message: ' + err_msg, 'warning')

    vector_status = Status.query.first()
    return render_template('manage_vectors.html',
                           title='Manage Vectors',
                           form=form,
                           vectors=vectors,
                           sdk_version=sdk_version,
                           vector_status=vector_status,
                           ip_form=ip_form)
Esempio n. 9
0
import os
import re
import time

from platform import system as operating_system

from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bs4 import BeautifulSoup
from shutil import which
from subprocess import call
from traceback import format_exc
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from zenlog import log

if operating_system() == 'Windows':
    import colorama
    colorama.init()


class ArgumentException(Exception):
    pass


class WGetNotFoundException(Exception):
    pass


def get_web_page(url):
    '''
    # Let the user know we are trying to download a webpage
Esempio n. 10
0
File: spd.py Progetto: skwerlman/SPD
import os
import re
import time

from platform import system as operating_system

from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bs4 import BeautifulSoup
from shutil import which
from subprocess import call
from traceback import format_exc
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from zenlog import log

if operating_system() == 'Windows':
    import colorama
    colorama.init()


class ArgumentException(Exception):
    pass


class WGetNotFoundException(Exception):
    pass


def get_web_page(url):
    '''
    # Let the user know we are trying to download a webpage
Esempio n. 11
0
def get_stock_data(tag: str, start_date: dt.datetime, end_date: dt.datetime) -> pd.core.frame.DataFrame:

    for i in ['data', 'img']:
        os.system(f'mkdir {i}')
        if operating_system() == 'Windows':
            os.system('cls')
        else:
            os.system('clear')
    
                
    # forces tag into a list
    tag = [tag]
    # attempts to pull the data
    try:
        # get it from yahoo
        data = pdr.get_data_yahoo(tag, start=start_date, end=end_date)
        # generate a index
        """
        Date,Adj Close,Close,High,Low,Open,Volume
        df = df.reindex(columns=column_names)
        ___
        df = df[['favorite_color','grade','name','age']]
        ___
        df1 = pd.DataFrame(df1,columns=['Name','Gender','Score','Rounded_score'])

        """
        
        # write it out in the og format
        data.to_csv(f'data/{tag[0]}.csv')

        # so that it can be read in 
        with open(f'data/{tag[0]}.csv', 'r') as in_file:
            lines = in_file.readlines()
        
        # and manipulated before being exported
        with open(f'data/{tag[0]}.csv', 'w+') as out_file:
            
            lines[0] = lines[0].replace('Attributes', 'Date')
            del lines[1:3]
            
            for i in lines:
                out_file.write(i)
        
        data = pd.read_csv(f'data/{tag[0]}.csv', index_col=0, parse_dates=True)

        data = data[['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']]
        # data["str_date"] = data["Date"]
        # data["Date"] = data["Date"].apply(
        #     lambda x: dt.datetime(
        #             *list(
        #                 map(
        #                     int, x.split('-')
        #                 )
        #             )
        #         ).toordinal()
        #     )

        # and loaded in as a dataframe and exported out as the function
        return data

    except Exception as e:
        # if the data is wrong, return a blank one
        print(e)
        return pd.DataFrame()
Esempio n. 12
0
def get_predictive_model(tag:str, start_date = pd.to_datetime('2020-01-01'), end_date = dt.datetime.today()):
    # pull in data from 
    
    df = get_stock_data(tag, start_date, end_date)
    train_dfs = df.copy()

    # Indexing Batches
    train_df = train_dfs.sort_values(by=['Date']).copy()

    # We safe a copy of the dates index, before we need to reset it to numbers
    date_index = train_df.index

    # Adding Month and Year in separate columns
    d = pd.to_datetime(train_df.index)
    train_df['Month'] = d.strftime("%m") 
    train_df['Year'] = d.strftime("%Y") 

    # We reset the index, so we can convert the date-index to a number-index
    train_df = train_df.reset_index(drop=True).copy()

    FEATURES = ['High', 'Low', 'Open', 'Close', 'Volume', 'Month']

    data = pd.DataFrame(train_df)
    data_filtered = data[FEATURES]

    # We add a prediction column and set dummy values to prepare the data for scaling
    data_filtered_ext = data_filtered.copy()
    data_filtered_ext['Prediction'] = data_filtered_ext['Close'] 

    # Calculate the number of rows in the data
    nrows = data_filtered.shape[0]
    np_data_unscaled = np.array(data_filtered)
    np_data_unscaled = np.reshape(np_data_unscaled, (nrows, -1))
    

    # Transform the data by scaling each feature to a range between 0 and 1
    scaler = RobustScaler()
    np_data = scaler.fit_transform(np_data_unscaled)

    # Creating a separate scaler that works on a single column for scaling predictions
    scaler_pred = RobustScaler()
    df_Close = pd.DataFrame(data_filtered_ext['Close'])
    np_Close_scaled = scaler_pred.fit_transform(df_Close)

    #Settings
    sequence_length = 100

    # Split the training data into x_train and y_train data sets
    # Get the number of rows to train the model on 80% of the data 
    train_data_len = math.ceil(np_data.shape[0] * 0.8) #2616

    # Create the training data
    train_data = np_data[0:train_data_len, :]
    x_train, y_train = [], []

    # The RNN needs data with the format of [samples, time steps, features].
    # Here, we create N samples, 100 time steps per sample, and 2 features
    for i in range(100, train_data_len):
        x_train.append(train_data[i-sequence_length:i,:]) #contains 100 values 0-100 * columsn
        y_train.append(train_data[i, 0]) #contains the prediction values for validation
        
    # Convert the x_train and y_train to numpy arrays
    x_train, y_train = np.array(x_train), np.array(y_train)

    # Create the test data
    test_data = np_data[train_data_len - sequence_length:, :]

    # Split the test data into x_test and y_test
    x_test, y_test = [], []
    test_data_len = test_data.shape[0]
    for i in range(sequence_length, test_data_len):
        x_test.append(test_data[i-sequence_length:i,:]) #contains 100 values 0-100 * columns
        y_test.append(test_data[i, 0]) #contains the prediction values for validation
    # Convert the x_train and y_train to numpy arrays
    x_test, y_test = np.array(x_test), np.array(y_test)

    # Convert the x_train and y_train to numpy arrays
    x_test = np.array(x_test); y_test = np.array(y_test)
        

    # Configure the neural network model
    model = Sequential()

    # Model with 100 Neurons 
    # inputshape = 100 Timestamps, each with x_train.shape[2] variables
    n_neurons = x_train.shape[1] * x_train.shape[2]
    
    model.add(LSTM(n_neurons, return_sequences=False, 
                input_shape=(x_train.shape[1], x_train.shape[2]))) 
    model.add(Dense(1, activation='relu'))

    # Compile the model
    model.compile(optimizer='adam', loss='mean_squared_error')

    epochs = 5  
    early_stop = EarlyStopping(monitor='loss', patience=2, verbose=1)
    history = model.fit(x_train, y_train, batch_size=16, 
                        epochs=epochs, callbacks=[early_stop])

    # Get the predicted values
    predictions = model.predict(x_test)


    # Get the predicted values
    pred_unscaled = scaler_pred.inverse_transform(predictions)

    # The date from which on the date is displayed
    display_start_date = pd.Timestamp('today') - timedelta(days=500)

    # Add the date column
    data_filtered_sub = data_filtered.copy()
    data_filtered_sub['Date'] = date_index

    # Add the difference between the valid and predicted prices
    train = data_filtered_sub[:train_data_len + 1]
    valid = data_filtered_sub[train_data_len:]
    valid.insert(1, "Prediction", pred_unscaled.ravel(), True)
    valid.insert(1, "Difference", valid["Prediction"] - valid["Close"], True)

    # Zoom in to a closer timeframe
    valid = valid[valid['Date'] > display_start_date]
    train = train[train['Date'] > display_start_date]

    # Visualize the data
    plt.subplots(figsize=(10, 8), sharex=True)
    xt = train['Date']; yt = train[["Close"]]
    xv = valid['Date']; yv = valid[["Close", "Prediction"]]
    plt.title("Predictions vs Actual Values", fontsize=20)
    plt.ylabel(tag, fontsize=18)
    plt.plot(xt, yt, color="#039dfc", linewidth=2.0)
    plt.plot(xv, yv["Prediction"], color="#E91D9E", linewidth=2.0)
    plt.plot(xv, yv["Close"], color="black", linewidth=2.0)
    plt.legend(["Train", "Test Predictions", "Actual Values"], loc="upper left")

    # # Create the bar plot with the differences
    x = valid['Date']
    y = valid["Difference"]

    # Create custom color range for positive and negative differences
    valid.loc[y >= 0, 'diff_color'] = "#2BC97A"
    valid.loc[y < 0, 'diff_color'] = "#C92B2B"

    plt.bar(x, y, width=0.8, color=valid['diff_color'])
    plt.grid()           

    save_loc = f'..\\img\\{tag}' if operating_system() == 'Windows' else f'../img/{tag}'

    try:
        plt.savefig(save_loc)
        save_loc = f'static\\{tag}' if operating_system() == 'Windows' else f'static/{tag}'
        plt.savefig(save_loc)
    except :
        save_loc = f'img\\{tag}' if operating_system() == 'Windows' else f'img/{tag}'
        plt.savefig(save_loc)
        save_loc = f'src\\static\\{tag}' if operating_system() == 'Windows' else f'src/static/{tag}'
        plt.savefig(save_loc)
    
    new_df = df

    d = pd.to_datetime(new_df.index)
    new_df['Month'] = d.strftime("%m") 
    new_df['Year'] = d.strftime("%Y") 
    new_df = new_df.filter(FEATURES)

    # Get the last 100 day closing price values and scale the data to be values between 0 and 1
    last_100_days = new_df[-int(len(new_df) * .1):].values
    last_100_days_scaled = scaler.transform(last_100_days)

    # Create an empty list and Append past 100 days
    X_test_new = []
    X_test_new.append(last_100_days_scaled)

    # Convert the X_test data set to a numpy array and reshape the data
    pred_price_scaled = model.predict(np.array(X_test_new))
    pred_price_unscaled = scaler_pred.inverse_transform(pred_price_scaled)
    values = []
    percentages = []
    for i in range(1,7):
        # Print last price and predicted price for the next day
        price_today = round(new_df['Close'][-1], 2)
        predicted_price = round(pred_price_unscaled.ravel()[0], i + 2)
        percent = round(100 - (predicted_price * 100)/price_today, 2)

        a = '+'
        if percent > 0:
            a = '-'

        
        values.append(predicted_price)
        percentages.append(percent)   
    
    
    
    expected = f'The average predicted close price after a week is {round(average(values), 1)}({a}{average(percentages)}%)'
    print(f'finished {tag}')
    try:
        return url_for("static", filename=f'{tag}.png'), expected
    except:
        return save_loc, expected
Esempio n. 13
0
def get_predictive_model(tag:str, start_date = pd.to_datetime('2020-01-01'), end_date = dt.datetime.today(), locs =[-2, [2,6]]):
    # pull in data from 
    df = get_stock_data(tag, start_date, end_date)


    # generate the index
    df.index = df["Date"]

    # parse through for the trimmed dataset   
    plt.style.use('ggplot')
    X = df.iloc[:, :locs[0]]
    y = df.iloc[:, locs[1]]

    mm = MinMaxScaler()
    ss = StandardScaler()

    X_ss = ss.fit_transform(X)
    y_mm = mm.fit_transform(y)

    size = int(len(X_ss) * .2) # replaces 200

    X_train = X_ss[:size, :]
    X_test = X_ss[size:, :]

    y_train = y_mm[:size, :]
    y_test = y_mm[size:, :]

    X_train_tensors = Variable(torch.Tensor(X_train))
    X_test_tensors = Variable(torch.Tensor(X_test))

    y_train_tensors = Variable(torch.Tensor(y_train))
    y_test_tensors = Variable(torch.Tensor(y_test))

    X_train_tensors_final = torch.reshape(X_train_tensors,   (X_train_tensors.shape[0], 1, X_train_tensors.shape[1]))
    X_test_tensors_final = torch.reshape(X_test_tensors,  (X_test_tensors.shape[0], 1, X_test_tensors.shape[1])) 

    num_epochs = 1000 #1000 epochs
    learning_rate = 0.001 #0.001 lr

    input_size = 5 #number of features
    hidden_size = 2 #number of features in hidden state
    num_layers = 1 #number of stacked lstm layers

    num_classes = 1 #number of output classes 

    lstm1 = LSTM1(num_classes, input_size, hidden_size, num_layers, X_train_tensors_final.shape[1]) #our lstm class 

    criterion = torch.nn.MSELoss()    # mean-squared error for regression
    optimizer = torch.optim.Adam(lstm1.parameters(), lr=learning_rate)    

    for epoch in range(num_epochs):
        outputs = lstm1.forward(X_train_tensors_final) #forward pass
        optimizer.zero_grad() #caluclate the gradient, manually setting to 0
        
        # obtain the loss function
        loss = criterion(outputs, y_train_tensors)
        
        loss.backward() #calculates the loss of the loss function
        
        optimizer.step() #improve from loss, i.e backprop
        if epoch % 100 == 0:
            print("Epoch: %d, loss: %1.5f" % (epoch, loss.item())) 

    df_X_ss = ss.transform(df.iloc[:, :-2]) #old transformers
    df_y_mm = mm.transform(df.iloc[:, -2:]) #old transformers

    df_X_ss = Variable(torch.Tensor(df_X_ss)) #converting to Tensors
    df_y_mm = Variable(torch.Tensor(df_y_mm))
    #reshaping the dataset
    df_X_ss = torch.reshape(df_X_ss, (df_X_ss.shape[0], 1, df_X_ss.shape[1])) 

    train_predict = lstm1(df_X_ss)#forward pass
    data_predict = train_predict.data.numpy() #numpy conversion
    dataY_plot = df_y_mm.data.numpy()

    try:
        data_predict = mm.inverse_transform(data_predict) #reverse transformation   
    except:
        None
    
    dataY_plot = mm.inverse_transform(dataY_plot)
    plt.figure(figsize=(10,6)) #plotting
    plt.axvline(x=200, c='r', linestyle='--') #size of the training set

    plt.plot(dataY_plot, label='Actual Data') #actual plot
    plt.plot(data_predict, label='Predicted Data') #predicted plot
    plt.title('Time-Series Prediction')
    plt.legend()
    

    save_loc = f'..\\img\\{tag}' if operating_system() == 'Windows' else f'../img/{tag}'

    try:
        plt.savefig(save_loc)
        plt.savefig(f'static\\{tag}' if operating_system() == 'Windows' else f'static/{tag}')
    except :
        save_loc = f'img\\{tag}' if operating_system() == 'Windows' else f'img/{tag}'
        plt.savefig(save_loc)
        plt.savefig(f'src\\static\\{tag}' if operating_system() == 'Windows' else f'src/static/{tag}')
    
    try:
        return url_for("static", filename=f'{tag}.png')
    except:
        return save_loc