def get_test_predictions(self, test_filename, save_filename):
        test_dataset = DialogueDataset(
            test_filename,
            self.config.sentence_len,
            self.vocab,
            False)

        test_data_loader = torch.utils.data.DataLoader(
            test_dataset, self.config.val_batch_size, shuffle=True)

        with open(test_filename, 'r') as f:
            data = json.load(f)

        start = time.clock()
        phase_metrics = dict()
        epoch_loss = list()
        epoch_metrics = list()
        results = {"accuracy": list(), "precision": list(), "recall": list(),
                   "F1": list()}
        average_epoch_loss = None
        for i, batch in enumerate(tqdm(test_data_loader,
                                       mininterval=2, desc='test', leave=False)):
            # prepare data
            src_seq, src_pos, src_seg, tgt = map(
                lambda x: x.to(self.device), batch[:4])

            ids = batch[4]
            start_end_idx = batch[5]

            # forward
            pred = self.model(src_seq, src_pos, src_seg, tgt)

            loss = F.cross_entropy(self.prepare_pred(pred).view(-1, 2),
                                   tgt.view(-1))

            average_loss = float(loss)
            epoch_loss.append(average_loss)
            average_epoch_loss = np.mean(epoch_loss)

            output = torch.argmax(self.prepare_pred(pred), 3)

            record_predictions(output, data, ids, start_end_idx)

            get_results(tgt.view(-1).cpu(), output.view(-1).cpu(), results)

        phase_metrics["avg_results"] = {key: np.mean(value) for key, value in
                                        results.items()}
        phase_metrics["loss"] = average_epoch_loss

        phase_metrics["time_taken"] = time.clock() - start
        string = ' {} loss: {:.3f} '.format('test', average_epoch_loss)
        print(string, end='\n')

        data["results"] = phase_metrics

        with open(save_filename, 'w') as f:
            json.dump(data, f)

        return phase_metrics
    def execute_phase(self, epoch, phase):
        if phase == "train":
            self.model.train()
            dataloader = self.data_loader_train
            batch_size = self.config.train_batch_size
            train = True
        else:
            self.model.eval()
            dataloader = self.data_loader_val
            batch_size = self.config.val_batch_size
            train = False

        start = time.clock()
        phase_metrics = dict()
        epoch_loss = list()
        epoch_metrics = list()
        results = {"accuracy": list(), "precision": list(), "recall": list(), "F1": list()}

        average_epoch_loss = None
        for i, batch in enumerate(tqdm(dataloader,
                          mininterval=2, desc=phase, leave=False)):
            # prepare data
            src_seq, src_pos, src_seg, tgt= map(
                lambda x: x.to(self.device), batch[:4])

            ids = batch[4]
            start_end_idx = batch[5]

            # forward
            if train:
                self.optimizer.zero_grad()
            pred = self.model(src_seq, src_pos, src_seg, tgt)

            loss = F.cross_entropy(self.prepare_pred(pred).view(-1, 2), tgt.view(-1))

            average_loss = float(loss)
            epoch_loss.append(average_loss)
            average_epoch_loss = np.mean(epoch_loss)

            if train:
                self.writer.add_scalar("train_loss",
                    average_loss, global_step=i + epoch * self.config.train_batch_size)
                # backward
                loss.backward()

                # update parameters
                self.optimizer.step_and_update_lr()
            output = torch.argmax(self.prepare_pred(pred), 3)
            get_results(tgt.view(-1).cpu(), output.view(-1).cpu(), results)

        phase_metrics["avg_results"] = {key: np.mean(value) for key, value in results.items()}
        phase_metrics["loss"] = average_epoch_loss

        phase_metrics["time_taken"] = time.clock() - start
        string = ' {} loss: {:.3f} '.format(phase, average_epoch_loss)
        print(string, end='\n')
        return phase_metrics
Пример #3
0
def addTracks(artist, tracks, artistNet):
    for track in tracks:
    # get list of users who have favorited this user's track
        favoriters = get_results(client, '/tracks/' + str(track.id) + '/favoriters')
        for user in favoriters:
            addAction('favorites', user, artist, addWeight('favorites', user, artist, artistNet, 'fav_weight'))

    # get list of users who have commented on this user's track
        commenters = get_results(client, '/tracks/' + str(track.id) + '/comments')
        for comment in commenters:
            addAction('comments', comment, artist, addWeight('favorites', comment, artist, artistNet, 'com_weight'))
Пример #4
0
def addTracks(profile, tracks): #, profileGraph):
    for track in tracks:
    # get list of users who have favorited this user's track
        favoriters = get_results(client, '/tracks/' + str(track.id) + '/favoriters')
        for favoriter in favoriters:
            addFav(favoriter.user, profile, track.id)
#           addAction('favorites', user, artist, addWeight('favorites', user, artist, profileGraph, 'fav_weight'))

    # get list of users who have commented on this user's track
        commenters = get_results(client, '/tracks/' + str(track.id) + '/comments')
        for commenter in commenters:
            addComment(commenter.user, profile, commenter.id)
Пример #5
0
def ga(conf, sample, start, end, step, typ, relations, basefile, metric):
    """
    Genetic algorithm for black box optimization
    :param conf: list of parameters to tune
    :param sample: a sample configuration
    :param start: the lower limit for values of each parameter (a dictionary)
    :param end: the upper limit for values of each parameter (a dictonary)
    :param step: the step size for values of each parameter (a dictionary)
    :param typ: type of the parameter (int,boolean etc) (Not being used currently)
    :param relations: defines the relationship that should be maintained between parameters that are being tuned and other parameters to maintain correctness of configuration
    :param basefile: basefile that contains a sample of a full topology configuration
    :param metric: comma separated metrics
    """
    # Initializations
    p = []
    total_runs = 50
    m = 12  #initial sample size
    mu_rate = 0.1  # Mutation rate
    rand_select = 0.1  # Random parent selection rate
    top = 0.2  #Percentage of top parents selected

    design_space = list()
    metric_values = list()
    numbers = list()
    result = dict(sample)

    # Generate the first n samples using LHS
    design_space = utils.generate_LHS(result, start, end, step, typ, relations,
                                      p, conf, m)
    # Get results and get the best configuration
    metric_values, numbers = utils.get_results(0, m, design_space, basefile,
                                               metric)

    while (len(design_space) <= total_runs):
        # Get two parents
        parents = get_parents(design_space, top, rand_select, metric)
        # Do a cross over between parents
        children = do_crossover(parents)
        # Perform mutations with a probability of 0.1
        mu_children = do_mutations(children, mu_rate, design_space, start, end,
                                   step, typ, relations, conf)

        # If the child has been evaluated before skip it
        if mu_children in design_space:
            continue
        n = len(design_space)
        design_space.extend(mu_children)
        # Get the results from running the application and based on the utility function
        metric_values, numbers = utils.get_results(n, n + len(mu_children),
                                                   design_space, basefile,
                                                   metric)
Пример #6
0
def get_chosen_price_results():
    user_query = request.form.get('user_query')
    price = request.form.get('median_price')
    higher_lower = request.form.get('price_choice')
    if higher_lower == 'higher':
        results = get_results(es_connection, user_query, min_price=price)
    else:
        results = get_results(es_connection, user_query, max_price=price)

    if results:
        brand_results = get_top_k_brands(results, top_k=5)
        return jsonify(status="OK", products=results, brands=brand_results)
    else:
        return jsonify(status="FAILED",
                       error=404,
                       message="No results found.")
Пример #7
0
def addTracks(profile, tracks): #, profileGraph):
    for track in tracks:
    # get list of users who have favorited this user's track
        try:
            favoriters = get_results(client, '/tracks/' + str(track.id) + '/favoriters')
            for favoriter in favoriters:
                addTrackFav(favoriter, profile) #, track.id)
#               addAction('favorites', user, artist, addWeight('favorites', user, artist, profileGraph, 'fav_weight'))

        # get list of users who have commented on this user's track
            commenters = get_results(client, '/tracks/' + str(track.id) + '/comments')
            for commenter in commenters:
                addTrackComm(commenter, profile) #, commenter.id)
#               addAction('comments', user, artist, addWeight('comments', comment, artist, profileGraph, 'com_weight'))
        except HTTPError:
            print "Track not processed due to HTTP error."
Пример #8
0
def job(query: str):
    message = None
    if not query.startswith('test'):
        job_id = utils.get_job_from_string(query)
    else:
        job_id = query

    job = utils.get_job(job_id)
    if not job:
        message = f'There isn`t job for word: "{query}"'
    else:
        if job.get_error_path():
            message = f'Error for job for word: "{query}"'
    if message:
        flash(message, 'errors')
        return redirect(url_for('main'))
    try:
        job_results = utils.apply_window(utils.get_results(job))
    except FileNotFoundError:
        return render_template("job.html",
                               meta=job.get_meta(),
                               inprogress=True,
                               data=[],
                               labels=[])

    return render_template("job.html",
                           meta=job.get_meta(),
                           data=job_results.data,
                           labels=list(map(str, job_results.labels)))
Пример #9
0
    def update_graph(*args):
        data = data_source()
        filter_ids, date_col, feature = get_filters(data)[4:-1]
        type_converter = lambda filter: "'" if type(list(data[filter])[0]) == str else " "
        for f in range(len(filter_ids)):
            filter = filter_ids[f]
            conv = type_converter(filter)
            query_str = ""
            if args[f] != 'ALL':
                is_started = " " if query_str == "" else " and "
                query_str += is_started + " " + filter + " == " + conv + str(args[f]) + conv

        result_data = get_results(date_col)
        dff = result_data if query_str == "" else result_data.query(query_str)
        if len(dff) == 0:
            return {"data": [], "layout": go.Layout(height=600, title="Time Line Of " + feature)}
        else:
            trace = [go.Scatter(x=dff[date_col],
                                y=dff['predict'],
                                mode='markers+lines',
                                customdata=dff[date_col],
                                name='prediction'),
                    go.Bar(x=dff[date_col], y=dff['predicted_label'])]
            return {"data": trace,
                    "layout": go.Layout(height=600, title="Anomaly Detection and Prediction " + feature)}
Пример #10
0
def main():
    cat_fn = "/scratch/ksf293/kavli/anomaly/data/hsc_catalogs/pdr2_wide_icmod_20.0-20.5_clean_more.csv"
    #cat_fn = "../data/hsc_catalogs/pdr2_wide_icmod_20.0-20.5.csv"
    cat = pd.read_csv(cat_fn)
    tag = 'gri_100k'
    results_dir = '/scratch/ksf293/kavli/anomaly/results'
    results_fn = f'{results_dir}/results_{tag}.h5'
    imarr_fn = f'/scratch/ksf293/kavli/anomaly/data/images_h5/images_{tag}.h5'

    #savetag = '_log'
    savetag = ''

    sigma = 0
    reals, recons, gen_scores, disc_scores, scores, idxs, object_ids = utils.get_results(
        results_fn, imarr_fn, sigma=sigma)

    flag = 'cmodel_ellipse_radius'
    plot_vs_scores(idxs, scores, flag, cat, tag, savetag, band='i')

    flags = [
        'pixelflags_interpolated', 'pixelflags_saturated',
        'pixelflags_clipped', 'pixelflags_rejected', 'pixelflags_inexact_psf',
        'pixelflags_cr', 'pixelflags_clippedcenter',
        'pixelflags_rejectedcenter', 'pixelflags_inexact_psfcenter'
    ]
Пример #11
0
def run_fourier_sarsa_experiments(transfer_episodes, transfer_epsilon, params):
    statistics = {"errors": [], "stopping_points": [], "utilities": []}

    filename = RESULTS_DIRECTORY + "fourier-sarsa-transfer-[{}]-[{}]-[{}].json".format(
        params["alpha"], params["epsilon"], params["order"])

    print("Training on {} with [increment = {}]".format(
        PROBLEM_DIRECTORY + PROBLEM_FILES[0][0], PROBLEM_FILES[0][1]))
    metareasoning_env = env.Environment(
        PROBLEM_DIRECTORY + PROBLEM_FILES[0][0], ALPHA, BETA,
        PROBLEM_FILES[0][1])
    prakhar = fourier_agent.Agent(metareasoning_env, params)
    prakhar.run_sarsa(statistics)

    for problem_file in PROBLEM_FILES[1:]:
        problem_file_path = PROBLEM_DIRECTORY + problem_file[0]
        increment = problem_file[1]

        params["episodes"] = transfer_episodes
        params["epsilon"] = transfer_epsilon

        print("Shifting to {} with [increment = {}]".format(
            problem_file_path, increment))
        metareasoning_env = env.Environment(problem_file_path, ALPHA, BETA,
                                            increment)
        prakhar = fourier_agent.Agent(
            metareasoning_env, params, prakhar.function_approximator.weights,
            prakhar.function_approximator.action_value_function)
        prakhar.run_sarsa(statistics)

    utils.save(filename, statistics)

    return utils.get_results(statistics["errors"], WINDOW_SIZE,
                             PLOT_WINDOW_SIZE)
Пример #12
0
def predict(bot, update):
    chat_id = update.message.chat_id
    msg = ''

    user_rephrase_times[chat_id][0] = 0

    if update.message.text.lower() not in non_fin_words and is_finance(
            clf, vect, update.message.text):
        user_contexts[chat_id] += " " + clean_text(update.message.text)
        results = get_results(clf, vect, user_contexts[chat_id])
        user_themes[chat_id] = [i[0] for i in results]

        if results[0][1] / results[1][1] > 3:
            msg += "Вас интересует тема \"" + labels[int(
                results[0][0])] + "\". Да?"
            custom_keyboard = [["Да"], ["Нет"]]
            mode = "SINGLE"
        else:
            msg += "Пожалуйста, уточните, какая тема вас интересует(Введите номер или выбирите в списке)"
            custom_keyboard = [[
                str(i + 1) + '. ' + labels[int(results[i][0])]
            ] for i in range(4)]
            custom_keyboard.append(["0. Никакая из этих тем не подходит"])
            mode = "MULTIPLE"

        user_check_times[chat_id] = [time.time(), False, mode]
        user_states[chat_id] = 'CHECK'

        reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
        bot.sendMessage(chat_id=chat_id, text=msg, reply_markup=reply_markup)
    else:
        msg += "Не похоже не финансовый текст"
        msg += cake
        bot.sendMessage(chat_id=update.message.chat_id, text=msg)
Пример #13
0
    def run_experiment(self):
        if self.onEuler:
            self.run_euler_experiment(self.nethz_username)
            if self.nethz_username == 'xxx':
                self.processs_euler_experiment()
            return

        self.re_init_data()
        files = sorted([
            os.path.join(CNF_FOLDER, f) for f in os.listdir(CNF_FOLDER) if
            os.path.isfile(os.path.join(CNF_FOLDER, f)) and f.endswith('.cnf')
        ])
        for s in EXECUTABLES:
            exe = os.path.join(parent_parent, "./{}_main".format(s))
            delete_files_in_folder(CNF_FOLDER, 'time')
            self.data[s] = {}
            for f in files:
                print('main: {}  file: {}'.format(s, f))
                self.data[s][f] = {}
                self.data[s][f]['info'] = get_info(f).__dict__
                command = 'gtimeout {} mpirun -np 4 {} {}'.format(
                    TIMEOUT, exe, f)
                run_n_times(command, REPETITIONS)
                timing_file = f[:-3] + 'time'
                times = get_results(timing_file)
                self.data[s][f]['time'] = times
Пример #14
0
def post_restart_phase(values, fx_new, x_new, fx0, x0, length, design_space,
                       basefile, metric, metric_values, numbers, total_runs):
    if fx_new < numpy.percentile(values, 80):
        if fx_new < fx0:
            print "Potential best result"
            design_space.append(x_new)
            value, number = utils.get_results(length, length + 1, design_space,
                                              basefile, metric)
            metric_values.extend(value)
            numbers.extend(number)
            f_temp = min(list(value))
            #f_temp = min(metric_values)
            #x_temp = design_space[numbers[metric_values.index(f_temp)]]
            x_temp = dict(x_new)
            length = len(design_space)

            if f_temp < fx0:
                if fx_new < f_temp:
                    x0 = dict(x_temp)
                    fx0 = f_temp
                else:
                    x0 = dict(x_new)
                    fx0 = fx_new
                print "Best result: " + str(fx0)
        if len(values) >= total_runs:
            return "terminate", design_space, metric_values, numbers, fx0, x0, length
        else:
            local_search = True
            return "localsearch", design_space, metric_values, numbers, fx0, x0, length
    else:
        if len(values) >= total_runs:
            return "terminate", design_space, metric_values, numbers, fx0, x0, length
        else:
            local_search = False
            return "restart", design_space, metric_values, numbers, fx0, x0, length
Пример #15
0
def restart_phase(design_space, result, start, end, step, typ, relations, p,
                  conf, l, length, basefile, metric):
    design_space.extend(
        generate_LHS(result, start, end, step, typ, relations, p, conf, l))
    # Get results and get the best configuration
    metric_values, numbers = utils.get_results(length, length + l,
                                               design_space, basefile, metric)
    return design_space, metric_values, numbers
Пример #16
0
def addTracks(artist, tracks, artistNet):
    for track in tracks:
        # get list of users who have favorited this user's track
        favoriters = get_results(client,
                                 '/tracks/' + str(track.id) + '/favoriters')
        for user in favoriters:
            addAction(
                'favorites', user, artist,
                addWeight('favorites', user, artist, artistNet, 'fav_weight'))

    # get list of users who have commented on this user's track
        commenters = get_results(client,
                                 '/tracks/' + str(track.id) + '/comments')
        for comment in commenters:
            addAction(
                'comments', comment, artist,
                addWeight('favorites', comment, artist, artistNet,
                          'com_weight'))
Пример #17
0
 def test_api_query(self):
     new_tag = Tag.objects.create(name='new-tag')
     Post.objects.create(title='Post with another tag', published=True).tags.set([new_tag])
     client = APIClient()
     response = client.get(reverse('post-list') + '?tag=tag2')
     self.assertEqual(response.status_code, 200)
     data = get_results(response.data)
     self.assertEqual(len(data), 1)
     self.assertEqual(data[0].get('id'), 1)
Пример #18
0
 def test_api_tag_list(self):
     client = APIClient()
     response = client.get(reverse('tag-list'))
     self.assertEqual(response.status_code, 200)
     data = get_results(response.data)
     self.assertEqual(len(data), 3)
     self.assertEqual(data[0], {'id': 1, 'name': 'tag1', 'count': 1})
     self.assertEqual(data[1], {'id': 2, 'name': 'tag2', 'count': 1})
     self.assertEqual(data[2], {'id': 3, 'name': 'tag3', 'count': 0})
Пример #19
0
 def test_posts_list_user_unpublished(self):
     client = APIClient()
     client.login(username='******', password='******')
     response = client.get(self.list_url)
     self.assertEqual(response.status_code, 200)
     data = get_results(response.data)
     self.assertEqual(len(data), 4)
     self.assertEqual(data[3]['id'], 4)
     self.assertEqual(data[3]['published'], False)
     self.assertEqual(data[3]['author']['id'], 2)
Пример #20
0
def gen_search_json():
    start_time = time.time()
    query = request.args.get("q", '')
    query = utils.process_term(query)
    results = utils.get_results(query.strip())
    resp = jsonify(results=results[:10])  # top 10 results
    resp.headers['Access-Control-Allow-Origin'] = '*'
    end_time = time.time()
    #print("Response time : " + str(end_time - start_time))
    return resp
Пример #21
0
def get_initial_query_results():
    user_query = request.form.get('user_query')
    results = get_results(es_connection, user_query)
    if results:
        median_price = get_price_info(results)
        return jsonify(status="OK", products=results, median_price=median_price)
    else:
        return jsonify(status="FAILED",
                       error=404,
                       message="No results found.")
Пример #22
0
 def test_api_category_list(self):
     client = APIClient()
     response = client.get(reverse('category-list'))
     self.assertEqual(response.status_code, 200)
     data = get_results(response.data)
     self.assertEqual(len(data), 5)
     self.assertEqual(data[0], {'id': 1, 'name': 'root1', 'count': 0, 'parent': None})
     self.assertEqual(data[1], {'id': 2, 'name': 'root2', 'count': 0, 'parent': None})
     self.assertEqual(data[2], {'id': 3, 'name': 'root1-node1', 'count': 0, 'parent': 1})
     self.assertEqual(data[3], {'id': 4, 'name': 'root1-node2', 'count': 1, 'parent': 1})
     self.assertEqual(data[4], {'id': 5, 'name': 'root1-node2-node1', 'count': 0, 'parent': 4})
Пример #23
0
def results():
    # get query
    query = request.args['query']

    # get results
    response = get_results(query)
    recordings = response['recordings']
    recordings = sorted(recordings, key=lambda rec: rec['q'])

    # render response
    return render_template('results.html.j2', recordings=recordings, query=query)
Пример #24
0
def get_query_results_for_sweater():
    # TODO: fill in - Siem.
    # If user indeed wants to search for sweaters, give sweater results.

    results = get_results(es_connection, query="jacket", filter_category="Jackets & Coats")

    if results:
        return jsonify(status="OK", products=results)
    else:
        return jsonify(status="FAILED",
                       error=404,
                       message="No results found.")
Пример #25
0
def new_potentialbest(fx0, x0, design_space, length, basefile, metric,
                      metric_values, numbers, neighborhood_size, alpha_passed,
                      conf, n_start, n_end, step, typ, relations):
    x_new = get_pbest(design_space, numbers, conf, metric_values, n_start,
                      n_end, step, typ, relations)
    design_space.append(x_new)
    val, num = utils.get_results(length, length + 1, design_space, basefile,
                                 metric)
    metric_values.extend(val)
    numbers.extend(num)
    fx_new = val
    length = len(design_space)
    if fx_new < fx0:
        print "Potential best result"
        design_space.append(x_new)
        value, number = utils.get_results(length, length + 1, design_space,
                                          basefile, metric)
        metric_values.extend(value)
        numbers.extend(number)
        f_temp = min(list(value))
        x_temp = dict(x_new)
        #f_temp = min(metric_values)
        #x_temp = design_space[numbers[metric_values.index(f_temp)]]
        length = len(design_space)

        if f_temp < fx0:
            print "New Best configuration found"
            if fx_new < f_temp:
                x0 = dict(x_temp)
                fx0 = f_temp
            else:
                x0 = dict(x_new)
                fx0 = fx_new
            alpha_passed = 1
            neighborhood_size *= alpha_passed
            print "Best result: " + str(fx0)
            return True, x0, fx0, alpha_passed, neighborhood_size, length

    return False, x0, fx0, alpha_passed, neighborhood_size, length
Пример #26
0
    def test_posts_list_anonymous_user(self):
        client = APIClient()
        response = client.get(self.list_url)
        self.assertIs(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 2)
        self.assertEqual(data[0]['id'], 1)
        self.assertEqual(data[1]['id'], 2)

        self.assertEqual(data[0]['title'], "Post #1")
        # now post-list does not return content
        # self.assertEqual(data[0]['content'], "Post #1 content")
        self.assertEqual(data[0]['published'], True)
        self.assertIsNotNone(data[1]['publish_time'])
Пример #27
0
def run_fourier_q_learning_experiments(params):
    statistics = {"errors": [], "stopping_points": [], "utilities": []}

    filename = RESULTS_DIRECTORY + "fourier-q-[{}]-[{}]-[{}]-{}".format(
        params["alpha"], params["epsilon"], params["order"], PROBLEM_FILE)

    metareasoning_env = env.Environment(PROBLEM_FILE_PATH, ALPHA, BETA,
                                        INCREMENT)
    prakhar = fourier_agent.Agent(params, metareasoning_env)
    prakhar.run_q_learning(statistics)

    utils.save(filename, statistics)

    return utils.get_results(statistics["errors"], WINDOW_SIZE,
                             PLOT_WINDOW_SIZE)
Пример #28
0
    def test_post_filter(self):
        client = APIClient()
        response = client.get(self.list_url + '?title=Post%20#1')
        self.assertEqual(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 1)
        self.assertEqual(data[0].get('id'), 1)

        response = client.get(self.list_url + '?content_contains=#2')
        self.assertEqual(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 1)
        self.assertEqual(data[0].get('id'), 2)

        response = client.get(self.list_url + '?author=user1')
        self.assertEqual(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 2)
        self.assertEqual(data[0].get('id'), 1)
        self.assertEqual(data[1].get('id'), 2)

        response = client.get(self.list_url + f'?publish_time_gte={self.time_now.date().isoformat()}')
        self.assertEqual(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 1)
        self.assertEqual(data[0].get('id'), 2)

        response = client.get(self.list_url + '?view_count_lte=5')
        self.assertEqual(response.status_code, 200)
        data = get_results(response.data)
        self.assertEqual(len(data), 1)
        self.assertEqual(data[0].get('id'), 1)

        # accepts pagination params
        response = client.get(self.list_url + '?limit=5&offset=1')
        self.assertEqual(response.status_code, 200)
Пример #29
0
def get_query_results_for_brand():
    """
    :return: new results based on query with user_query and brand_name filter
    as well as price choice from previous prompt.
    """
    user_query = request.form.get('user_query')
    brand = request.form.get('brand_choice')
    price = request.form.get('median_price')
    higher_lower = request.form.get('price_choice')
    if higher_lower == 'higher':
        results = get_results(es_connection, user_query, min_price=price, filter_brand=brand)
    else:
        results = get_results(es_connection, user_query, max_price=price, filter_brand=brand)

    lat = request.form.get('latitude')
    lon = request.form.get('longitude')

    if results:
        temperature = get_weather_for_latlon(lat, lon)
        return jsonify(status="OK", products=results, temperature=temperature)
    else:
        return jsonify(status="FAILED",
                       error=404,
                       message="No results found.")
Пример #30
0
    def test_api_post(self):
        client = APIClient()

        client.login(username='******', password='******')
        response = client.post(reverse('post-list'), format='json',
                               data={'title': 'New Post', 'content': 'To test adding tags and categories for contents.',
                                     'tags': ['tag2', 'new_tag'], 'category': 'root1-node2-node1'})
        self.assertEqual(response.status_code, 201)
        data = get_results(response.data)

        self.assertTrue(Tag.objects.filter(name='new_tag').exists())

        self.assertEqual(data['category'], 'root1-node2-node1')
        self.assertEqual(data['tags'][0], {'id': 2, 'name': 'tag2', 'count': 2})
        self.assertEqual(data['tags'][1], {'id': 4, 'name': 'new_tag', 'count': 1})
Пример #31
0
def gen_search_json():
    start_time = time.time()
    query = request.args.get("q", '')
    query = utils.process_term(query)
    results = utils.get_results(query.strip())
    resp = jsonify(results=results[:])  # top 10 results
    print("########################################")
    print("IF HERE IS MORE THEN ZERO BYTE THEN SERVER IS SENDING RESULT TO FRONTEND")
    print(resp)
    print("########################################")
    resp.headers['Access-Control-Allow-Origin'] = '*'
    # end_time = time.time()
    # print("Response time : " + str(end_time - start_time))

    return resp
Пример #32
0
    def processs_euler_experiment(self):
        self.re_init_data()
        files = sorted([
            os.path.join(CNF_FOLDER, f) for f in os.listdir(CNF_FOLDER) if
            os.path.isfile(os.path.join(CNF_FOLDER, f)) and f.endswith('.cnf')
        ])
        for s in EXECUTABLES:
            self.data[s] = {}
            for f in files:
                time_file = f.replace('.cnf', '_{}.time'.format(s))
                print('main: {}  file: {}'.format(s, time_file))

                self.data[s][f] = {}
                self.data[s][f]['info'] = get_info(f).__dict__
                times = get_results(time_file)
                self.data[s][f]['time'] = times
Пример #33
0
def build_per_batch_size(snmp_version):
    results = {}
    base_options = {
        'round': 10,
        'session': 10,
        'snmp_version': snmp_version,
    }
    for i in [5, 10, 15, 20, 50, 100]:
        kwargs = base_options.copy()
        kwargs.update({
            'oid_batch_size': i,
        })
        print("Run for params: {}".format(kwargs))
        results[i] = get_results(**kwargs)

    create_all_graphs(results, per_value='batch_size', desc="params: {}".format(base_options))
Пример #34
0
import soundcloud
from utils import get_results

# create a client object with your app credentials
client = soundcloud.Client(client_id='454aeaee30d3533d6d8f448556b50f23')

i = 0
for track in get_results(client, '/tracks'):
    print i, track.title
    i += 1
    if i == 100: break

i = 0
for user in get_results(client, '/users'):
    print i, user.first_name, user.last_name, user.id
    i+= 1
    if i == 100: break
Пример #35
0
 def get_queryset(self):
     results = get_results(self.request.GET.get('query'))
     return results
Пример #36
0
def getTracks(profile):
    tracks = get_results(client, '/users/{0:s}/tracks/'.format(str(profile)))
    return tracks
Пример #37
0
def getComments(profile):
    comments = get_results(client, '/users/{0:s}/comments/'.format(str(profile)))
    return comments
Пример #38
0
def getRelationships(profile, client, url): return get_results(client, url)

@handle_http_errors
Пример #39
0
def getFollowings(profile):
    # get list of users who the artist is following.
    followings = get_results(client, '/users/{0:s}/followings/'.format(str(profile)))
    return followings
Пример #40
0
def getFollowers(profile):
    followers = get_results(client, '/users/{0:s}/followers/'.format(str(profile)))
    return followers
Пример #41
0
def getFavorites(profile):
    favorites = get_results(client, '/users/{0:s}/favorites/'.format(str(profile)))
    return favorites