Exemple #1
0
def gen_pos():

    progress = 0.0

    cropped_images = []

    print("Cropping Images")

    for data_point in positive_data:
        img = cv2.imread("../lara_data/images/" + data_point[0])
        height, width = img.shape[:2]
        up_limit = (int(data_point[2]) +
                    int(data_point[4])) / 2 - max_window_size[1] / 2
        down_limit = (int(data_point[2]) +
                      int(data_point[4])) / 2 + max_window_size[1] / 2
        if up_limit < 0:
            down_limit -= up_limit
            up_limit = 0
        if down_limit > height:
            up_limit += (down_limit - height)
            down_limit = height
        left_limit = (int(data_point[1]) +
                      int(data_point[3])) / 2 - max_window_size[0] / 2
        right_limit = (int(data_point[1]) +
                       int(data_point[3])) / 2 + max_window_size[0] / 2
        if left_limit < 0:
            right_limit -= left_limit
            left_limit = 0
        if right_limit > width:
            left_limit += (right_limit - width)
            right_limit = width
        cropped_img = img[up_limit:down_limit, left_limit:right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(
                max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress / float(len(data)))

    print("Generating Positive Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "pos" + str(i) + ".ppm"
        image_path = os.path.join(pos_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress / float(len(cropped_images)))
def get_dict_of_pure_nash(number_of_games, demand_factor, impressions,
                          dir_location):
    """
    This function calls the function that plots the best response graph 
    for games with 2 to 20 players. It returns a dict of pure nash.
    """
    dict_of_pure_nash = {}
    for i in range(2, 21):
        update_progress(i / 20)
        profile_data = produce_profile_data(number_of_games, dir_location, i)
        DG = produce_mean_best_response_graph(profile_data)
        dict_of_pure_nash[i] = get_pure_nash(DG)
    return dict_of_pure_nash
def main(audit):
    log.info(
        log_title(message="Load to Target Step: AKA do the migration already"))
    log.info(
        log_title(
            message=
            f"Source: {db_config['source_schema']} Target: sirius.{db_config['target_schema']}"
        ))
    log.info(f"Working in environment: {os.environ.get('ENVIRONMENT')}")

    if environment != "preproduction":
        amend_dev_data(db_engine=target_db_engine)

    tables_dict = table_helpers.get_enabled_table_details()
    tables_list = table_helpers.get_table_list(tables_dict)

    if audit == "True":
        log.info(f"Running Pre-Audit - Table Copies")
        run_audit(target_db_engine, source_db_engine, "before", log,
                  tables_list)
        log.info(f"Finished Pre-Audit - Table Copies")

    for i, table in enumerate(tables_list):

        log.debug(f"This is table number {i + 1} of {len(tables_list)}")

        insert_data_into_target(
            db_config=db_config,
            source_db_engine=source_db_engine,
            target_db_engine=target_db_engine,
            table_name=table,
            table_details=tables_dict[table],
        )
        update_data_in_target(
            db_config=db_config,
            source_db_engine=source_db_engine,
            table=table,
            table_details=tables_dict[table],
        )

        completed_tables.append(table)

    if environment == "local":
        update_progress(module_name="load_to_sirius",
                        completed_items=completed_tables)

    if audit == "True":
        log.info(f"Running Post-Audit - Table Copies and Comparisons")
        run_audit(target_db_engine, source_db_engine, "after", log,
                  tables_list)
        log.info(f"Finished Post-Audit - Table Copies and Comparisons")
def gen_pos():

    progress = 0.0

    cropped_images = []

    print("Cropping Images")

    for data_point in positive_data:
        img = cv2.imread("../lara_data/images/" + data_point[0])
        height, width = img.shape[:2]
        up_limit = (int(data_point[2]) + int(data_point[4]))/2 - max_window_size[1]/2
        down_limit = (int(data_point[2]) + int(data_point[4]))/2 + max_window_size[1]/2
        if up_limit < 0:
            down_limit -= up_limit
            up_limit = 0
        if down_limit > height:
            up_limit += (down_limit - height)
            down_limit = height
        left_limit = (int(data_point[1]) + int(data_point[3]))/2 - max_window_size[0]/2
        right_limit = (int(data_point[1]) + int(data_point[3]))/2 + max_window_size[0]/2
        if left_limit < 0:
            right_limit -= left_limit
            left_limit = 0
        if right_limit > width:
            left_limit += (right_limit - width)
            right_limit = width
        cropped_img = img[up_limit: down_limit, left_limit: right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress/float(len(data)))

    print("Generating Positive Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "pos" + str(i) + ".ppm"
        image_path = os.path.join(pos_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress/float(len(cropped_images)))
def main(clear, include_tests, chunk_size):

    log.info(log_title(message="Migration Step: Transform Casrec Data"))
    log.info(
        log_title(
            message=
            f"Source: {db_config['source_schema']} Target: {db_config['target_schema']}"
        ))
    log.info(
        log_title(
            message=
            f"Enabled entities: {', '.join(k for k, v in config.ENABLED_ENTITIES.items() if v is True)}"
        ))
    log.debug(f"Working in environment: {os.environ.get('ENVIRONMENT')}")
    version_details = helpers.get_json_version()
    log.info(
        f"Using JSON def version '{version_details['version_id']}' last updated {version_details['last_modified']}"
    )

    db_config["chunk_size"] = chunk_size if chunk_size else 10000
    log.info(f"Chunking data at {chunk_size} rows")
    print(f"allowed_entities: {allowed_entities}")

    if clear:
        clear_tables(db_config=db_config)

    clients.runner(target_db=target_db, db_config=db_config)
    cases.runner(target_db=target_db, db_config=db_config)
    bonds.runner(target_db=target_db, db_config=db_config)
    supervision_level.runner(target_db=target_db, db_config=db_config)
    deputies.runner(target_db=target_db, db_config=db_config)
    death.runner(target_db=target_db, db_config=db_config)
    events.runner(target_db=target_db, db_config=db_config)
    finance.runner(target_db=target_db, db_config=db_config)
    remarks.runner(target_db=target_db, db_config=db_config)
    reporting.runner(target_db=target_db, db_config=db_config)
    tasks.runner(target_db=target_db, db_config=db_config)
    teams.runner(target_db=target_db, db_config=db_config)
    visits.runner(target_db=target_db, db_config=db_config)
    warnings.runner(target_db=target_db, db_config=db_config)

    if include_tests:
        run_data_tests(verbosity_level="DEBUG")

    if environment == "local":
        update_progress(module_name="transform", completed_items=files_used)
        log.debug(f"Number of mapping docs used: {len(files_used)}")
Exemple #6
0
def gen_neg():
    progress = 0.0
    cropped_images = []
    for i in range(9000):
        frame_number = str(random.randint(0, 11178))
        frame = 'frame_' + '0' * (6 -
                                  len(frame_number)) + frame_number + '.jpg'
        img = cv2.imread("../lara_data/images/" + frame)
        height, width = img.shape[:2]
        x = random.randint(max_window_size[0], width - max_window_size[0])
        y = random.randint(max_window_size[1], height - max_window_size[1])
        up_limit = y - max_window_size[1] / 2
        down_limit = y + max_window_size[1] / 2
        left_limit = x - max_window_size[0] / 2
        right_limit = x + max_window_size[0] / 2
        cropped_img = img[up_limit:down_limit, left_limit:right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(
                max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress / float(9000))

    print("Generating Negative Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "neg" + str(i) + ".ppm"
        image_path = os.path.join(neg_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress / float(len(cropped_images)))
def gen_neg():
    progress = 0.0
    cropped_images = []
    for i in range(9000):
        frame_number = str(random.randint(0, 11178))
        frame = 'frame_' + '0'*(6-len(frame_number)) + frame_number + '.jpg'
        img = cv2.imread("../lara_data/images/" + frame)
        height, width = img.shape[:2]
        x = random.randint(max_window_size[0], width - max_window_size[0])
        y = random.randint(max_window_size[1], height - max_window_size[1])
        up_limit = y - max_window_size[1]/2
        down_limit = y + max_window_size[1]/2
        left_limit = x - max_window_size[0]/2
        right_limit = x + max_window_size[0]/2
        cropped_img = img[up_limit: down_limit, left_limit: right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress/float(9000))

    print("Generating Negative Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "neg" + str(i) + ".ppm"
        image_path = os.path.join(neg_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress/float(len(cropped_images)))
Exemple #8
0
def update():
    update_progress(module_name="load_to_staging", completed_items=completed_tables)
    global result
    result = "update complete"
        lc_multipliers.append(y.lc_multiplier)
        classic_multipliers.append(y.classic_multiplier)

        y.set_lc_g_mean(lc_multipliers)
        y.set_classic_g_mean(classic_multipliers)

        lc_g_means.append(y.lc_g_mean)
        classic_g_means.append(y.classic_g_mean)

        age += 1

    total_lc_g_means += lc_g_means
    total_classic_g_means += classic_g_means

    update_progress('Running simulations', iteration / iterations)


update_progress('Simulations complete', 1)

mean_diffs = list(map(operator.sub, total_lc_g_means, total_classic_g_means))
negatives = find_negative_count(mean_diffs)
negative_probability = negatives / len(mean_diffs)

min_diff = min(mean_diffs)
max_diff = max(mean_diffs)

print('mean: ', mean(mean_diffs))
print('stdev: ', stdev(mean_diffs))
print('variance : ', variance(mean_diffs))
print('median: ', median(mean_diffs))
Exemple #10
0
def api(data: Dict) -> Dict[str, Any]:
    def check_param(default: Any, param: str):
        return default if param not in data else data[param]

    text = data['text']
    ip = data["ip"]
    query = check_param(None, "query")
    q_threshold = check_param(0.5, "query_threshold")
    # TEXT TRANSLATOR
    # translate_from = check_param(Language.TAGALOG.value, "translate_from")
    # translate_to = check_param(Language.ENGLISH.value, "translate_to")
    # text = TranslatorManager(text, translate_from = translate_from, translate_to=translate_to).translated_text

    # TEXT NORMALIZER
    token_count = 0
    raw_sents = list()
    sentences = list()
    start_time = time()

    partitions = NormalizerManager.partitioned_docs(text)
    if len(partitions) > 15:
        t_normalizer = NormalizerManager(partitions)
        raw_sents = t_normalizer.raw_sents
        sentences = t_normalizer.sentences
        token_count = len(t_normalizer.tokens)
    else:
        for i, partition in enumerate(partitions):
            sleep(1)
            update_progress(ip, round(30 / (len(partitions) - i), 2))
            tn = TextNormalizer(partition,
                                query=query,
                                query_similarity_threshold=q_threshold)
            raw_sents.extend(tn.raw_sents)
            sentences.extend(tn.sentences)
            token_count += len(tn.tokens)

    # TEXT SENTIMENT CLASSIFIER
    neu_threshold = check_param(0.1, "threshold_classifier")

    # TEXT TOPIC MODELLER
    visualize = check_param(False, "visualize")
    dashboard_style = check_param(True, "dashboard_style")

    # TEXT SUMMARIZER
    summary_length = data['summary_length']
    sort_by_score = check_param(False, "sort_by_score")

    options = {
        "raw_sents": raw_sents,
        "sents": sentences,
        "summary": summary_length,
        "sort_by_score": sort_by_score,
        "visualize": visualize,
        "query": query,
        "style": dashboard_style,
        "partitions": partitions,
        "neu_threshold": neu_threshold,
        "ip": ip,
        "q_t": q_threshold
    }

    samuel_data = dict()
    print("Preparing API Process Pool")
    update_progress(ip, round(30 + (30 / 4), 2))
    pool = Pool()
    update_progress(ip, round(30 + (30 / 2), 2))
    print("Mapping API Processes")
    result = pool.map_async(partial(api_processor, options=options),
                            list(range(3)))
    for data in result.get():
        samuel_data.update(data)
    for i in range(10):
        update_progress(ip, round(45 + (50 / (10 - i)), 2))
        sleep(0.25)
    end_time = time()
    update_progress(ip, 100)
    sleep(0.5)
    print("API Pooling Done")
    print(
        "Data processed in", round(end_time - start_time,
                                   2), "secs. with over", len(raw_sents),
        "sentences consisted of", token_count,
        "tokens (excluding sentences and tokens below normalization threshold)"
    )
    return samuel_data