def main():
    # Get DF working with all the data
    df = prep_df()

    # Keeping history (vectors)
    cost_hist = list([])
    parameters_hist = list([])

    # Initiate graph (with standard edges)
    initial_graph, initial_parameters = generate_graph(df)

    # Actual cost
    g = m.run_model(initial_graph)
    old_cost = m.calc_error(g)

    # Initial parameters
    # parameters = [0.5]

    cost_hist.append(old_cost)
    parameters_hist.append(initial_parameters)

    T = 1.0
    T_min = 0.001
    # original = 0.9
    alpha = 0.9
    parameters = initial_parameters

    while T > T_min:
        print 'Temp: ', T
        #graph, parameters = generate_graph(df, initial_parameters)

        i = 1
        # original = 100
        while i <= 100:
            new_parameters = list(neighbor(parameters))
            graph, parameters = generate_graph(df, new_parameters)
            g = m.run_model(graph)
            new_cost = m.calc_error(g)

            if new_cost < old_cost:
                parameters = new_parameters[:]
                parameters_hist.append(parameters)
                old_cost = new_cost
                #print new_cost
                cost_hist.append(old_cost)
            else:
                ap = acceptance_probability(old_cost, new_cost, T)
                if ap > random():
                    parameters = new_parameters[:]
                    parameters_hist.append(parameters)
                    old_cost = new_cost
                    #print new_cost
                    cost_hist.append(old_cost)
            i += 1
        T = T * alpha

    plot_results(parameters, cost_hist, parameters_hist)

    return parameters, cost_hist, parameters_hist
Beispiel #2
0
def trainmodel():
    try:
        run_model()
        resp = jsonify(success=True)
        resp.status_code = 200
        return resp
    except:
        resp = jsonify(success=False)
        resp.status_code = 201
        return jsonify(resp)
Beispiel #3
0
def trainmodel():
    try:
        nb_epoch = int(request.args['nb_epoch'])
        run_model(nb_epochs=nb_epoch)
        resp = jsonify(success=True)
        resp.status_code = 200
        return resp
    except:
        resp = jsonify(success=False)
        resp.status_code = 201
        return jsonify(resp)
Beispiel #4
0
def process_project(wdir, p, cfg):
    # if h.verbosity == h.LOG_DEBUG:
    # print(p)
    data = p['data']

    # Branches based on type of quantification specified in the .config file
    if cfg['MODEL']['Type'] == 'Stability':
        ms.run_model(wdir, data, cfg)
    if cfg['MODEL']['Type'] == 'Relative':
        mr.run_model(wdir, data, cfg)
    if cfg['MODEL']['Type'] == 'Absolute':
        m.run_model(wdir, data, cfg)
Beispiel #5
0
def home():
    createTablesWhenNecessary()

    if flask.request.args.get('pricesLeftInPercent') == None:
        return flask.jsonify(error='PARAMETER_NOT_GIVEN',
                             parameter='pricesLeftInPercent')

    if flask.request.args.get('timePassedInPercent') == None:
        return flask.jsonify(error='PARAMETER_NOT_GIVEN',
                             parameter='timePassedInPercent')

    prices_left_in_percent = float(
        flask.request.args.get('pricesLeftInPercent'))
    time_passed_in_percent = float(
        flask.request.args.get('timePassedInPercent'))

    prices_give_out_tendency_in_percent = get_current_prices_give_out_tendency_in_percent(
    )

    give_out_price = model.run_model(time_passed_in_percent,
                                     prices_left_in_percent,
                                     prices_give_out_tendency_in_percent)
    addParticipationToTable(1 if give_out_price else 0)

    return flask.jsonify(
        error='',
        give_out_price=give_out_price,
        prices_give_out_tendency_in_percent=prices_give_out_tendency_in_percent,
        prices_left_in_percent=prices_left_in_percent,
        time_passed_in_percent=time_passed_in_percent)
Beispiel #6
0
def main(args):
    # Unitprot fasta sequence to train ProtVec model
    fastaFile = "data/fasta/uniprot_sprot.fasta"
    processedDataFile = "data/processedData/" + str(
        args.len) + "seq_embeddings_" + args.seqtype + ".pkl"

    # if biovec is not trained, train the model. Otherwise, load the preprocessed data
    if not isfile(processedDataFile):
        if not isfile("data/embeddings/" + str(args.len) +
                      "grams_embeddings.txt"):
            print("Generating embeddings for subsequences of length " +
                  str(args.len) + ".")
            run_biovec(fastaFile, args)
        else:
            print("Using pre-generated embeddings")

    # set the folder name (with test sequence fasta file) inside data folder as organism
    organism = ["eDBD_195"]
    for org in organism:
        print(org)
        folder_name = "data/" + org + "/"
        if not os.path.exists(folder_name):
            os.makedirs((folder_name))

        test_fasta = folder_name + "seq.fasta"
        test_output = folder_name + "testSequence_" + args.seqtype + ".csv"
        test_df = parse_fasta_sequences(test_fasta, test_output)
        preprocess_sequences(processedDataFile,
                             args,
                             test_data_file=test_output)

        print("Running model")
        classes = [
            'end_preference', 'periodic_preference', 'groove_preference',
            'dyad_preference', 'gyre_spanning', 'orientational_preference',
            'nucleosome_stability'
        ]
        if args.binding == -1:
            selected_classes = np.arange(len(classes))
        else:
            selected_classes = np.asarray([args.binding])
        print("Training for ", list(map(classes.__getitem__,
                                        selected_classes)))

        run_model(org, folder_name, processedDataFile, args, classes,
                  selected_classes)
Beispiel #7
0
def run_inference():
    img = request
    img = np.fromstring(img.get_data(), np.uint8)
    img = cv2.imdecode(img, cv2.IMREAD_COLOR)
    cv2.imwrite("Image.jpg", img)
    seg_map = run_model(graph, img)
    action = inference(seg_map)

    return action
Beispiel #8
0
def runTest(data_ids, data_conflicts, team_size, test_name="Test"):

    # Calculate number of students and teams
    num_students = len(data_ids)
    num_teams = num_students // team_size + min(1, num_students % team_size)

    # Deduplicate conflicts
    conflicts = []
    ids = [row[0] for row in data_ids]
    for i in range(len(data_conflicts)):
        conflict = [
            ids.index(data_conflicts[i][0]),
            ids.index(data_conflicts[i][3])
        ]
        inverse_conflict = [
            ids.index(data_conflicts[i][3]),
            ids.index(data_conflicts[i][0])
        ]
        if inverse_conflict not in conflicts:
            conflicts.append(conflict)

    # Get gpa vector
    gpas = [int(round(float(row[4]))) for row in data_ids]

    # Run model
    raw_output = run_model(num_students, num_teams, team_size, conflicts, gpas)

    output = {"model": {"hasConflicts": raw_output["hasConflicts"]}}

    # Create empty teams
    output['teams'] = {}
    for team in range(num_teams):
        output['teams'][team + 1] = {'members': []}

    # Fill teams with assignments
    for i, a in enumerate(raw_output["assignments"]):
        output['teams'][a + 1]["members"].append(data_ids[i][0])

    # Create people
    output['people'] = {}
    for i in range(len(data_ids)):
        output['people'][data_ids[i][0]] = {
            'id': data_ids[i][0],
            'firstName': data_ids[i][1],
            'lastName': data_ids[i][2],
            'email': data_ids[i][3],
            'gpa': round(float(data_ids[i][4])),
            'conflicts': []
        }
        for j in range(len(data_conflicts)):
            if data_conflicts[j][0] == data_ids[i][0]:
                output['people'][data_ids[i][0]]['conflicts'].append(
                    data_conflicts[j][3])

    # Create test report file
    getTestReport(output, data_ids, data_conflicts, num_teams, team_size,
                  test_name)
def create_musicxml(path, measure_length, key_number):
    """
    This function takes the path to an uploaded file, its measure_length, and its key number
    (usually info inputted by user) and passes the image through the first neural net to extract the measures,
    then passes each measure through the second neural net to convert it to xml.
    
    The handle_page function covers the first part and the run_model function covers the second.
    """
    handle_page(path, measure_length, key_number,
                os.path.join(MEDIA_ROOT, 'current_measures'))
    measures = []

    # initialize the xml output
    soup = BeautifulSoup(features='xml')
    score_partwise = soup.new_tag('score-partwise', version='3.1')
    part_list = soup.new_tag('part-list')
    score_part = soup.new_tag('score-part', id='P1')
    part_name = soup.new_tag('part-name')
    soup.append(score_partwise)
    score_partwise.append(part_list)
    part_list.append(score_part)
    score_part.append(part_name)
    part_name.append('Piano')
    part = soup.new_tag('part', id='P1')
    score_partwise.append(part)

    # loop through each extracted measure and convert it to xml
    # if the conversion fails, return a blank measure
    for i in range(
            len(os.listdir(os.path.join(MEDIA_ROOT, 'current_measures')))):
        print('handling measure ', i + 1)
        measure_soup = run_model(
            os.path.join(MEDIA_ROOT, 'current_measures', f'subimage{i}.png'),
            measure_length, key_number)
        if measure_soup:
            measure = measure_soup.find('measure')
            # only need the key and time sig info on the first measure
            if i != 0:
                attributes = measure.find('attributes')
                attributes.extract()
            measures.append(measure)
            print(f'measure {i+1} successful')
        else:
            blank_measure = get_blank_measure(measure_length)
            measures.append(blank_measure)
            print('error in measure ', i + 1)
    for measure in measures:
        part.append(measure)

    # pick a random filename for the output
    filename = np.random.choice(list('abcdefghijklmnopqrstuvwxyz0123456789'),
                                size=16)
    filename = ''.join(filename)
    with open(os.path.join(MEDIA_ROOT, f'{filename}.musicxml'), 'w+') as f:
        f.write(str(soup))
    return filename
Beispiel #10
0
def plot_png(path):
    plt.cla() # Don't create fig and ax here since plotlib keeps them alive

    N, t_min, t_max, R_0, I_0, R0, D, y_max, t0_date = params()

    t, S, I, R = run_model(R0, D, N, I_0, R_0, t_min, t_max)
    plot(ax, t, S, I, R, t0_date, y_max)

    output = io.BytesIO()
    FigureCanvasAgg(fig).print_png(output)
    return Response(output.getvalue(), mimetype='image/png')
Beispiel #11
0
def main():
    args = parse_args()
    adj, edges, feats, labels = load_data(args.input)
    embeddings = run_model(edges, adj, feats, args.lamb, args.eta, args.dimension, args.learning_rate, args.epoch,
                           gpu_fraction=args.gpu_fraction,
                           batchsize=args.batchsize,
                           print_every_epoch=args.print_every_epoch,
                           scope_name='default')
    # save the embeddings
    np.savetxt(args.output, embeddings, delimiter=',')

    # performance evaluation
    indice = np.random.permutation(labels.shape[0])
    acc = node_classification(embeddings, labels, indice)
    print("Node classification: ACC={}".format(acc))
Beispiel #12
0
def measure_upload(request):
    if request.method == 'POST':
        form = UploadedMeasureForm(request.POST, request.FILES)
        if form.is_valid():
            file = form.save()
            path = 'media/' + str(file.measure)
            key_number = int(str(file.key))
            measure_length = int(str(file.time_signature))
            s = run_model(path, measure_length, key_number)
            with open('media/output.musicxml', 'w+') as f:
                f.write(s)
            return HttpResponseRedirect(reverse('measure_output'))
    elif request.method == 'GET':
        form = UploadedMeasureForm()
    context = {'form': form}
    return render(request, 'measure_upload.html', context)
Beispiel #13
0
def page_upload(request):
    if request.method == 'POST':
        form = UploadedPageForm(request.POST, request.FILES)
        if form.is_valid():
            file = form.save()
            path = 'media/' + str(file.page)
            key_number = int(str(file.key))
            measure_length = int(str(file.time_signature))
            handle_page(str(file.page), measure_length, key_number,
                        'media/current_measures/')

            measures = []
            soup = BeautifulSoup(features='xml')
            score_partwise = soup.new_tag('score-partwise', version='3.1')
            part_list = soup.new_tag('part-list')
            score_part = soup.new_tag('score-part', id='P1')
            part_name = soup.new_tag('part-name')
            soup.append(score_partwise)
            score_partwise.append(part_list)
            part_list.append(score_part)
            score_part.append(part_name)
            part_name.append('Piano')
            part = soup.new_tag('part', id='P1')
            score_partwise.append(part)

            os.chdir('..')
            for i in range(len(os.listdir('./media/current_measures/'))):
                measure_soup = run_model(
                    f'./media/current_measures/subimage{i}.png', 16, 0)
                measure = measure_soup.find('measure')
                if i != 0:
                    attributes = measure.find('attributes')
                    attributes.extract()
                measures.append(measure)
                for measure in measures:
                    part.append(measure)

            with open('media/output_musicxml.musicxml', 'w+') as f:
                f.write(str(soup))

            return HttpResponseRedirect('media/output_musicxml.musicxml')
    elif request.method == 'GET':
        form = UploadedPageForm()
    context = {'form': form}
    return render(request, 'page_upload.html', context)
def generate_teams(model_input, model_dict):

    print("!! run model")

    result = run_model(num_students=model_input["num_students"],
                       num_teams=model_input["num_teams"],
                       team_size=model_input["team_size"],
                       conflicts=model_input["conflicts"],
                       gpas=model_input["gpas"],
                       ifgpa=model_input["ifgpa"] == "true")

    print("!! model finished")

    print(result)

    model_dict["status"] = "finished"
    model_dict["result"] = result

    print("!! process finished")
def main():
    rmtree('out')
    makedirs('out')

    parser = argparse.ArgumentParser()
    parser.add_argument('content_image')
    parser.add_argument('style_image')
    args = parser.parse_args()

    content_input = matrix_from_image_file(args.content_image, 0.1)
    style_input = matrix_from_image_file(args.style_image)

    model = build_model(layers, content_input, style_input)
    wp = WeightProvider()

    for i in range(10000):
        content_weights, style_weights = wp.get_weights()

        print('Using weights:\n{}\n{}'.format(content_weights, style_weights))
        outfile = f'out/run_{i:0>3}.png'

        rel_content_weights = [
            w * CONTENT_WEIGHT_MULTIPLIER for w in content_weights
        ]

        img, losses = run_model(model, RUNS_PER_EPOCH, content_input.shape,
                                rel_content_weights, style_weights)
        img.save(outfile)

        r = {
            'img':
            outfile,
            'layers':
            package_layers(
                zip(layers, content_weights, losses[:8], style_weights,
                    losses[8:])),
            'parts': ['content', 'style'],
        }

        with open(RESULTS_FILE, 'w') as fh:
            dump(r, fh)
def menu():
    value = 0
    if request.method == "POST":
        req = request.form
        age = req.get('age')
        height = req.get('height')
        weight = req.get('weight')
        budget = req.get('budget')
        allergies = req.get('allergies')
        ingredients = req.get('ingredients')
        sex = req.get('sex')
        activity = req.get('activity')
        breakfast, lunch, dinner = run_model(int(age), int(weight),
                                             float(height), int(budget),
                                             ingredients, allergies, activity,
                                             sex)
        value = 1
    if value == 1:
        return render_template(
            'menu.html',
            breakfast_title=breakfast['Title'],
            price_b=breakfast['Price'],
            fat_b=breakfast['Fat'],
            carbs_b=breakfast['Carbs'],
            protein_b=breakfast['Protein'],
            calories_b=breakfast["Calories"],
            lunch_title=lunch['Title'],
            price_l=lunch['Price'],
            fat_l=lunch['Fat'],
            carbs_l=lunch['Carbs'],
            protein_l=lunch['Protein'],
            calories_l=lunch["Calories"],
            dinner_title=dinner['Title'],
            price_d=dinner['Price'],
            fat_d=dinner['Fat'],
            carbs_d=dinner['Carbs'],
            protein_d=dinner['Protein'],
            calories_d=dinner["Calories"],
        )
    else:
        return render_template('menu.html')
Beispiel #17
0
import pandas as pd
import numpy as np
import os
os.chdir('C:/Users/ALLEN/Desktop/PythonTest/HousePrice/')
import preprocess as pre
import model
train = pd.read_csv('C:/Users/ALLEN/Desktop/PythonTest/HousePrice/train.csv')
test = pd.read_csv('C:/Users/ALLEN/Desktop/PythonTest/HousePrice/test.csv')
x_train, y_train, test, test_ID = pre.input_data(train, test)
model.run_model(x_train, y_train, test, test_ID, 'nn')
Beispiel #18
0
def upload(userid):

    # if the page is sending a post request
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file[]' not in request.files:
            # if there is no file at all then return this
            return 'didnt work'
        # otherwise retrieve the file
        print('before request')
        file = request.files.getlist('file[]')
        print('after request')
        print(file)
        # if the file name is blank then basically no file was selected.
        # return back to the same page
        # if file.filename == '':
        #     return redirect('/upload/' + str(userid))

        # if the file path is satisfactory then

        # get the current date and time
        now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        small_images = []
        for image in file:
            print(image)
            print(image.filename)

            # if the filename is nothing then there is no file
            # if so, redirect back to the same page.
            if image.filename == '':
                return redirect('/upload/%r' % userid)
            if file:

                # open the image using pillow
                img = Image.open(image)
                # find its dimensions
                width, height = img.size
                # if the file is not standard size
                if img.size != (512, 512):
                    if width < 300 or height < 300:
                        # add image to small images list
                        small_images = small_images + [str(image)]
                        # skip image
                        continue
                    print(img.size)
                    # then either resixe the image or...
                    img = img.resize((512, 512))

                    # Crop the center of the image
                    # new_width, new_height = (512, 512)
                    # left = (width - new_width) / 2
                    # top = (height - new_height) / 2
                    # right = (width + new_width) / 2
                    # bottom = (height + new_height) / 2

                    # img = img.crop((left, top, right, bottom))

                # if the image is too small then say it is too small
                elif width < 256 or height < 256:
                    # we will deal with this later
                    print(img.size, 'too smol')

                # initialise file path to nothing
                path = ''
                # if the user is signed in as a guest
                if userid == 0:
                    # use the standard file path, overwrite the original if it exists.
                    path = '/static/images/guestimage.png'
                else:
                    # otherwise if the user is signed in

                    # count how many images the user has already entered
                    # these should be saves seperately and not overwritten
                    images = Images.query.filter(
                        Images.user_id.like(userid)).all()
                    count = len(images)

                    # create a unique file name based on the user and how many images they have already entered.
                    # this allows us to recreate the path easily in a way that is user specific
                    path = '/static/images/%s_%s_image.png' % (str(userid),
                                                               count)

                # save the image
                img.save('.' + path)

                # run the image through the model
                output = run_model('.' + path)
                print(output)

                # create a new image object to store
                new_image = Images(
                    user_id=userid,
                    image_path=path,
                    # default scan image and notes to nothing for now
                    # scan_image_path=str(output['scan_img']),
                    scan_image_path=path.rstrip('.png') + 'scan.png',
                    cancer_class=int(output['cancer_class']),
                    notes=str(output),
                    date_time_added=now)

                # add the image to the db
                db.session.add(new_image)
                db.session.commit()

                print(path)

            # return the show_image page to display the image the person just uploaded
            # return render_template('show_image.html',
            #     userid=str(userid),
            #     # pass in the new image path
            #     image_path=path,
            #     )

        # if there are any undersized images, render this message html
        if small_images != []:
            return render_template('message_sent.html',
                                   user=Users.query.get_or_404(userid),
                                   message='an image was too small',
                                   images=small_images)
        return redirect(url_for('.show_image', userid=userid, timestamp=now))

    # if request is not post, render upload page
    return render_template('/upload.html', userid=userid)
Beispiel #19
0
def main():
    tickers = pd.read_csv("./Nasdaq_Screeners.csv")["Symbol"].tolist()

    ## asks user for the ticker to be analyzed
    while True:
        print("Type symbol of stock to be analyzed: ")
        user_input = input()

        if user_input.upper() in tickers:
            print("Ticker validation successful")
            ts = TimeSeries(key="OS6T161LRRJ4V9N2", output_format="pandas")
            data, meta_data = ts.get_daily_adjusted(
                symbol=user_input, outputsize="full"
            )
            break

        print("Invalid tickerv: try different ticker")

    ## Variables
    #### Need to edit this part

    train_forward = 0
    n_training = 2000
    timesteps = 0
    repeat = 1

    all_data = np.zeros((10, 10, repeat))
    final_data = np.zeros((10, 10))

    for k in range(1):  ##change to x axis
        for j in range(1):  ## change to y axis

            timesteps = 10 * (j + 1)
            forecast_days = 10 * (k + 1)
            n_testing = timesteps + 70
            temp = np.zeros(repeat)

            for i in range(repeat):
                train_forward = 500 * i + 500

                x_data = dt.to_techin(data, forecast_days)
                y_data = dt.to_updown(data, forecast_days, len(x_data))

                toolbox = pp.pptoolbox(
                    x_data, train_forward, n_training, n_testing, timesteps
                )

                x_train = toolbox.x_train()
                x_test = toolbox.x_test()
                y_train = toolbox.y_train(y_data)
                y_test = toolbox.y_test(y_data)

                temp[i] = md.run_model(timesteps, x_train, y_train, x_test, y_test)
                all_data[k, j, i] = temp[i]
                final_data[k, j] = st.median(temp)

    # give final_data to plot.py
    # give user statistical report
    ##finsih!!!!

    test_data = [
        [36.0, 46.0, 54.0, 58.5, 24.5, 57.5, 63.0, 48.0, 52.0, 58.5],
        [52.0, 43.5, 54.5, 49.5, 44.0, 45.0, 58.5, 38.0, 69.0, 59.5],
        [33.5, 35.0, 56.0, 47.0, 61.0, 49.0, 63.0, 56.0, 57.5, 49.5],
        [33.0, 44.5, 48.5, 53.5, 49.5, 59.5, 64.0, 62.5, 49.5, 42.5],
        [40.0, 41.0, 53.0, 46.0, 55.0, 59.0, 46.5, 56.0, 51.0, 52.0],
        [28.0, 39.5, 53.5, 57.0, 60.5, 59.5, 58.0, 57.0, 69.0, 57.0],
        [32.5, 44.0, 42.5, 57.0, 59.5, 56.5, 61.0, 56.0, 66.0, 65.5],
        [34.0, 31.0, 47.0, 52.5, 56.5, 60.0, 63.0, 37.0, 52.5, 62.0],
        [27.0, 41.5, 56.0, 54.5, 57.0, 41.0, 61.0, 51.5, 56.5, 65.0],
        [28.0, 35.0, 50.0, 50.0, 60.0, 60.0, 45.0, 52.5, 64.5, 56.0],
    ]
    final_data = np.array(test_data)
    dt.report(final_data, user_input)
    plt.plot(100, 100, final_data)
Beispiel #20
0
import sampling
import model

sampling.Cap()
model.run_model()
Beispiel #21
0
def analyzeImage():
    # Validate Request
    data = request.form
    imageBatchUploadId = data.get('imageBatchUploadId', None)
    hasBatchId = bool(imageBatchUploadId is not None)
    if not hasBatchId:
        return (jsonify({'ok': False, 'message': 'Bad request'}), 400)
    print("/analyze: Request Validation successful")

    # Get all images for filename
    images = mongo.db.images.find({'imageBatchUploadId': imageBatchUploadId})
    scores = []
    for image in images:
        print("/analyze: " + image['imageFileName'])
        # Get image by filename
        filename = image['imageFileName']
        storage = gridfs.GridFS(mongo.db)
        out = storage.get_version(filename)
        image_bytes = out.read()
        img, original_format = bytes_to_image(image_bytes)

        image_list = [(filename, img)]

        # Run model
        results_dict = model.run_model(image_list)
        model_output_scores = results_dict[filename]['scores']
        scores = []
        for output in model_output_scores:
            if "N/A" not in output:
                scores.append(output)
        classifiedImage = Image.fromarray(
            results_dict[filename]['classifiedImage'])
        print("/analyze: Score = ", scores)
        if len(scores) == 0:
            print("Removing")
            mongo.db.images.remove({'imageFileName': {'$eq': filename}})
        else:
            # Update mongo document about this image with classification results
            classified_filename = filename + "-classified"
            mongo.db.images.update_one({'imageFileName': filename}, {
                '$set': {
                    'classifiedImageFileName': classified_filename,
                    'scores': scores
                }
            })
            # Save the new image
            output = io.BytesIO()
            classifiedImage.save(output, format=original_format)
            storage.put(output.getvalue(),
                        filename=classified_filename,
                        content_type="image/png")

    if all(score is not None for score in scores):
        response = jsonify({'status': 200, 'ok': True})
        response.headers.add('Access-Control-Allow-Origin', '*')
        return response
    else:
        response = jsonify({
            'ok': False,
            'message': 'Internal Error in classification model'
        })
        response.headers.add('Access-Control-Allow-Origin', '*')
        return response
Beispiel #22
0
def results():

    time_start = time.time()

    # get params
    params = parse_params(request)
    app.logger.info(f'... params = {params}')
    highlight_before = params['highlight_before']
    highlight_after = params['highlight_after']
    baseline_before = params['baseline_before']
    baseline_after = params['baseline_after']
    return_type = params['return_type']
    remote_host = params['remote_host']
    local_host = params['local_host']
    model = params['model']
    score_thold = params['score_thold']
    model_level = params['model'].get('model_level', 'dim')

    # get charts to pull data for
    charts = get_chart_list(host=remote_host)

    # get data
    df = get_data(remote_host, charts, after=baseline_after, before=highlight_before,
                  diff=True, ffill=True, numeric_only=True, nunique_thold=0.05)
    colnames = list(df.columns)
    arr_baseline = df.query(f'{baseline_after} <= time_idx <= {baseline_before}').values
    arr_highlight = df.query(f'{highlight_after} <= time_idx <= {highlight_before}').values
    charts = list(set([col.split('|')[0] for col in colnames]))
    app.logger.info(f'... len(charts) = {len(charts)}')
    app.logger.info(f'... len(colnames) = {len(colnames)}')
    app.logger.info(f'... arr_baseline.shape = {arr_baseline.shape}')
    app.logger.info(f'... arr_highlight.shape = {arr_highlight.shape}')
    time_got_data = time.time()
    app.logger.info(f'... time start to data = {time_got_data - time_start}')

    # get scores
    results_dict = run_model(model, colnames, arr_baseline, arr_highlight)

    time_got_scores = time.time()
    app.logger.info(f'... time data to scores = {round(time_got_scores - time_got_data, 2)}')

    # get results to df
    df_results = results_to_df(results_dict, score_thold)

    time_done = time.time()
    app.logger.info(f'... time total = {round(time_done - time_start, 2)}')

    # build response
    if return_type == 'html':
        charts = df_results['chart'].values.tolist()
        counts = OrderedDict(Counter([c.split('.')[0] for c in charts]).most_common())
        counts = ' | '.join([f"{c}:{counts[c]}" for c in counts])
        summary_text = f"number of charts = {df_results['chart'].nunique()}, number of dimensions = {len(df_results)}, {counts}"
        charts_to_render = []
        for chart in df_results['chart'].unique():
            df_results_chart = df_results[df_results['chart'] == chart]
            dimensions = ','.join(df_results_chart['dimension'].values.tolist())
            rank = df_results_chart['chart_rank'].unique().tolist()[0]
            score_avg = round(df_results_chart['score'].mean(), 2)
            score_min = round(df_results_chart['score'].min(), 2)
            score_max = round(df_results_chart['score'].max(), 2)
            charts_to_render.append(
                {
                    "id": chart,
                    "title": f"{rank} - {chart} - score_avg = {score_avg}, score_min = {score_min}, score_max = {score_max}",
                    "after": baseline_after,
                    "before": highlight_before,
                    "data_host": "http://" + f"{remote_host.replace('127.0.0.1', local_host)}/".replace('//', '/'),
                    "dimensions": dimensions
                }
            )
        return render_template(
            'results.html', charts=charts_to_render, highlight_after=highlight_after*1000,
            highlight_before=highlight_before*1000, summary_text=summary_text
        )
    elif return_type == 'json':
        return jsonify(df_results.to_dict(orient='records'))
    else:
        return None
Beispiel #23
0
def rerun_model():
    run_model()
    return "completed running model"
Beispiel #24
0
def run(create_df=False):
    if create_df:
        scrape(start=20)
    run_model()
Beispiel #25
0
def estimate():
    data = request.get_json()
    result = run_model(data["data"])
    return jsonify(data=result)
Beispiel #26
0
baseline_before = highlight_after - 1
baseline_after = baseline_before - (window_size * baseline_window_multiplier)

# get charts
charts = get_chart_list(starts_with=starts_with, host=host)

# get data
colnames, arr_baseline, arr_highlight = get_data(host, charts, baseline_after,
                                                 baseline_before,
                                                 highlight_after,
                                                 highlight_before)
time_got_data = time.time()
print(f'... time start to data = {time_got_data - time_start}')

# get scores
results = run_model(model, charts, colnames, arr_baseline, arr_highlight)
time_got_scores = time.time()
print(f'... time data to scores = {round(time_got_scores - time_got_data, 2)}')

# df_results_chart
df_results_chart = results_to_df(results, model)
time_got_results = time.time()
print(
    f'... time scores to results = {round(time_got_results - time_got_scores, 2)}'
)

time_done = time.time()
print(f'... time total = {round(time_done - time_start,2)}')

print(df_results_chart)
import nltk


# ### Process Data###

# query_news_from_nyt()
# interpolate_df = query_stock_price()
# merging_data(interpolate_df)

# ### IF FIRST TIME USE NLTK ###
# nltk.download()

# ### Build Model ###

print('### Build Model ###')
df, df_stocks = processWithData()
df, df_stocks = sentimentIntensity(df, df_stocks)
print('### read data and sentiment intensity success:', df.head())

datasetNorm = normalize_data(df)
print('### normalize data success:', datasetNorm.head())

hp = generate_hyperparameters(len(datasetNorm.index))
xTrain, yTrain, xTest, yTest = split_train_and_test_set(datasetNorm, hp)
print('### split train and test data success:')

# ### Run model ###

run_model(
    {'xTrain': xTrain, 'yTrain': yTrain, 'xTest': xTest, 'yTest': yTest}, hp)
Beispiel #28
0
def main():
    args = _parse_args()
    data_dir = args.data_dir
    res_file = args.res_file
    run_feature(data_dir)
    run_model(res_file)
Beispiel #29
0
import argparse
from model import run_model
import scraper

parser = argparse.ArgumentParser()
parser.add_argument('--lang',
                    nargs='*',
                    type=str,
                    default=["en", "es", "de", "zh-CN"])
parser.add_argument('environment',
                    help='The environment the tests will run on')
args = parser.parse_args()

scraper.sample(args.environment, args.lang)
run_model('2019 hackathon example.csv', '.')
Beispiel #30
0
def runme():
    if (len(sys.argv) > 1 and sys.argv[1] == "fill"):
        run_model()
        run_test()
    port = int(os.environ.get('PORT', 5000))
    app.run(debug=True, port=port, host='0.0.0.0')
inds = []
for i in range(len(df['losses'])):
    if loss_name in df['losses'][i]:
        inds.append(i)
best_ind = inds[np.argmin(df['val_loss'][inds])]
print(df.iloc[best_ind])
pdb.set_trace()


# >> x_train, x_test (mock_data)
training_size, test_size, input_dim = 1039, 116, 8896
noise_level = 0.5
center_factor, h_factor = 5., 0.2
    
x, x_train, y_train, x_test, y_test = \
    ml.signal_data(training_size=training_size,
                   test_size=test_size,
                   input_dim=input_dim,
                   noise_level=noise_level,
                   center_factor=center_factor,
                   h_factor=h_factor)
num_classes, orbit_gap_start, orbit_gap_end = False, False, False
ticid_train, ticid_test = False, False
rms_train, rms_test = False, False

# >> run model
history, model, x_predict = ml.run_model(x_train, y_train, x_test, y_test, p,
                              supervised=False)
ml.diagnostic_plots(history, model, p, output_dir, '', x, x_train, x_test,
                    x_predict, mock_data=True)