def test_create_training_pipeline_custom_training_managed_dataset_sample(
        capsys, shared_state, pipeline_client):
    create_training_pipeline_custom_training_managed_dataset_sample.create_training_pipeline_custom_training_managed_dataset_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_display_name=MODEL_DISPLAY_NAME,
        dataset_id=DATASET_ID,
        annotation_schema_uri=ANNOTATION_SCHEMA_URI,
        training_container_spec_image_uri=TRAINING_CONTAINER_SPEC_IMAGE_URI,
        model_container_spec_image_uri=MODEL_CONTAINER_SPEC_IMAGE_URI,
        base_output_uri_prefix=BASE_OUTPUT_URI_PREFIX,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)

    # Poll until the pipeline succeeds because we want to test the model_upload step as well.
    helpers.wait_for_job_state(
        get_job_method=pipeline_client.get_training_pipeline,
        name=shared_state["training_pipeline_name"],
        expected_state="SUCCEEDED",
        timeout=1800,
        freq=20,
    )

    training_pipeline = pipeline_client.get_training_pipeline(
        name=shared_state["training_pipeline_name"])

    # Check that the model indeed has been uploaded.
    assert training_pipeline.model_to_upload.name != ""

    shared_state["model_name"] = training_pipeline.model_to_upload.name
Example #2
0
def test_ucaip_generated_create_dataset_video_sample_vision(
        capsys, shared_state):
    create_dataset_video_sample.create_dataset_video_sample(
        display_name=f"temp_create_dataset_test_{uuid4()}", project=PROJECT_ID)
    out, _ = capsys.readouterr()
    assert "create_dataset_response" in out

    shared_state["dataset_name"] = helpers.get_name(out)
Example #3
0
def test_ucaip_generated_create_endpoint_sample(capsys, shared_state):

    create_endpoint_sample.create_endpoint_sample(display_name=DISPLAY_NAME,
                                                  project=PROJECT)

    out, _ = capsys.readouterr()
    assert "create_endpoint_response" in out

    shared_state["endpoint_name"] = helpers.get_name(out)
Example #4
0
def test_ucaip_generated_create_custom_job(capsys, shared_state):
    create_custom_job_sample.create_custom_job_sample(
        display_name=f"temp_create_custom_job_test_{uuid.uuid4()}",
        container_image_uri=CONTAINER_IMAGE_URI,
        project=PROJECT_ID,
    )
    out, _ = capsys.readouterr()
    assert "response" in out

    shared_state["custom_job_name"] = helpers.get_name(out)
Example #5
0
def test_ucaip_generated_create_dataset_sample_vision(capsys, shared_state):
    create_dataset_sample.create_dataset_sample(
        display_name=f"temp_create_dataset_test_{uuid4()}",
        metadata_schema_uri=IMAGE_METADATA_SCHEMA_URI,
        project=PROJECT_ID,
    )
    out, _ = capsys.readouterr()
    assert "create_dataset_response" in out

    shared_state["dataset_name"] = helpers.get_name(out)
def test_ucaip_generated_create_dataset_tabular_bigquery(capsys, shared_state):
    create_dataset_tabular_bigquery_sample.create_dataset_tabular_bigquery_sample(
        display_name=f"temp_create_dataset_test_{uuid4()}",
        bigquery_uri=BIGQUERY_URI,
        project=PROJECT_ID,
    )
    out, _ = capsys.readouterr()
    assert "create_dataset_response" in out

    shared_state["dataset_name"] = helpers.get_name(out)
def test_ucaip_generated_upload_model_sample(capsys, shared_state):

    upload_model_sample.upload_model_sample(
        display_name=DISPLAY_NAME,
        metadata_schema_uri="",
        image_uri=IMAGE_URI,
        artifact_uri=ARTIFACT_URI,
        project=PROJECT_ID,
    )

    out, _ = capsys.readouterr()

    shared_state["model_name"] = helpers.get_name(out, key="model")
Example #8
0
def upload_handler():
    """
    End point to upload files. Upload files asynchronously.
    """
    path = request.form.get('path') or ''
    if request.method == 'POST':
        files = request.files.getlist('files')
        filenames = [_ for _ in os.listdir(os.path.join(CONTENT_FOLDER, path ))]
        for file in files:
            if file and helpers.is_allowed_file(file.filename):
                filename = helpers.get_name(file.filename, filenames)
                file.save(os.path.join(app.config['UPLOAD_FOLDER'], path, filename))
    return helpers.generate_error('0', 'Upload successful.')
def test_ucaip_generated_upload_model_explain_image_managed_container_sample(
        capsys, shared_state):

    upload_model_explain_image_managed_container_sample.upload_model_explain_image_managed_container_sample(
        display_name=DISPLAY_NAME,
        artifact_uri=ARTIFACT_URI,
        container_spec_image_uri=IMAGE_URI,
        project=PROJECT_ID,
        input_tensor_name=INPUT_TENSOR_NAME,
        output_tensor_name=OUTPUT_TENSOR_NAME)

    out, _ = capsys.readouterr()

    shared_state["model_name"] = helpers.get_name(out, key="model")
Example #10
0
def upload_handler():
    """
    End point to upload files. Upload files asynchronously.
    """
    path = request.form.get('path') or ''
    if request.method == 'POST':
        files = request.files.getlist('files')
        filenames = [_ for _ in os.listdir(os.path.join(CONTENT_FOLDER, path))]
        for file in files:
            if file and helpers.is_allowed_file(file.filename):
                filename = helpers.get_name(file.filename, filenames)
                file.save(
                    os.path.join(app.config['UPLOAD_FOLDER'], path, filename))
    return helpers.generate_error('0', 'Upload successful.')
def test_create_hyperparameter_tuning_job_python_package_sample(capsys, shared_state):

    create_hyperparameter_tuning_job_python_package_sample.create_hyperparameter_tuning_job_python_package_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        executor_image_uri=EXECUTOR_IMAGE_URI,
        package_uri=PACKAGE_URI,
        python_module=PYTHON_MODULE,
    )

    out, _ = capsys.readouterr()
    assert "response" in out

    shared_state["hyperparameter_tuning_job_name"] = helpers.get_name(out)
Example #12
0
def test_ucaip_generated_create_training_pipeline_video_object_tracking_sample(
        capsys, shared_state):

    create_training_pipeline_video_object_tracking_sample.create_training_pipeline_video_object_tracking_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #13
0
def test_create_training_pipeline_video_action_recognition_sample(
        capsys, shared_state):
    create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name=MODEL_DISPLAY_NAME,
        model_type=MODEL_TYPE,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #14
0
def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state):

    create_training_pipeline_tabular_classification_sample.create_training_pipeline_tabular_classification_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
        target_column=TARGET_COLUMN,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #15
0
def compute_covariance_matrix(model_name, to_json=True):
    model = gensim.models.Doc2Vec.load(model_name)

    doctags = list(model.docvecs.doctags)
    N = len(doctags)

    X = []

    for x in doctags:
        X.append(model.docvecs[x])

    X = np.array(X)

    # R[i, j] = R[j, i] = dot(vi, vj) / (norm(vi) * norm(vj))
    R = np.corrcoef(X)

    if to_json:
        RR = {}
        for x, dx in enumerate(doctags):
            for y, dy in enumerate(doctags):
                RR[get_name(dx), get_name(dy)] = R[x, y]
        return doctags, RR
    else:
        return doctags, R
Example #16
0
def test_ucaip_generated_create_batch_prediction_tcn_sample(capsys, shared_state):

    model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}"

    create_batch_prediction_job_text_classification_sample.create_batch_prediction_job_text_classification_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_name=model_name,
        gcs_source_uri=GCS_SOURCE_URI,
        gcs_destination_output_uri_prefix=GCS_OUTPUT_URI,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created batch prediction job
    shared_state["batch_prediction_job_name"] = helpers.get_name(out)
def test_ucaip_generated_create_training_pipeline_custom_job_sample(
        capsys, shared_state):

    create_training_pipeline_custom_job_sample.create_training_pipeline_custom_job_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
        container_image_uri='gcr.io/ucaip-sample-tests/mnist-custom-job:latest',
        base_output_directory_prefix=
        'gs://ucaip-samples-us-central1/training_pipeline_output')

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #18
0
def test_create_data_labeling_job_active_learning_sample(capsys, shared_state):

    create_data_labeling_job_active_learning_sample.create_data_labeling_job_active_learning_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset=f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}",
        instruction_uri=INSTRUCTIONS_GCS_URI,
        inputs_schema_uri=INPUTS_SCHEMA_URI,
        annotation_spec=ANNOTATION_SPEC,
        api_endpoint=API_ENDPOINT,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created data labeing job
    shared_state["data_labeling_job_name"] = helpers.get_name(out)
def test_ucaip_generated_deploy_model_sample(capsys, shared_state):

    assert shared_state["endpoint_name"] is not None

    # Deploy existing image classification model to endpoint
    deploy_model_sample.deploy_model_sample(
        project=PROJECT_ID,
        model_name=MODEL_NAME,
        deployed_model_display_name=f"temp_deploy_model_test_{uuid4()}",
        endpoint_id=shared_state["endpoint_name"].split("/")[-1],
    )

    # Store deployed model ID for undeploying
    out, _ = capsys.readouterr()
    assert "deploy_model_response" in out

    shared_state["deployed_model_id"] = helpers.get_name(out=out, key="id")
def test_ucaip_generated_create_data_labeling_job_sample(capsys, shared_state):

    dataset_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}"

    create_data_labeling_job_video_sample.create_data_labeling_job_video_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        instruction_uri=INSTRUCTIONS_GCS_URI,
        dataset=dataset_name,
        annotation_spec=ANNOTATION_SPEC,
        api_endpoint=API_ENDPOINT,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created data labeing job
    shared_state["data_labeling_job_name"] = helpers.get_name(out)
def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state):

    shared_state["cancel_batch_prediction_job_timeout"] = 300

    create_training_pipeline_sample.create_training_pipeline_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        training_task_definition=TRAINING_DEFINITION_GCS_PATH,
        dataset_id=DATASET_ID,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #22
0
def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state):

    # The return of the cancellation can be flaky; max of 20 runs was 215 sec.
    shared_state["cancel_batch_prediction_job_timeout"] = 300

    create_training_pipeline_tabular_regression_sample.create_training_pipeline_tabular_regression_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
        target_column=TARGET_COLUMN,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #23
0
def test_ucaip_generated_upload_model_explain_tabular_managed_constainer_sample(
        capsys, shared_state):

    upload_model_explain_tabular_managed_container_sample.upload_model_explain_tabular_managed_container_sample(
        display_name=DISPLAY_NAME,
        artifact_uri=ARTIFACT_URI,
        container_spec_image_uri=IMAGE_URI,
        project=PROJECT_ID,
        input_tensor_name=INPUT_TENSOR_NAME,
        output_tensor_name=OUTPUT_TENSOR_NAME,
        feature_names=[
            "crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad",
            "tax", "ptratio", "b", "lstat"
        ])

    out, _ = capsys.readouterr()

    shared_state["model_name"] = helpers.get_name(out, key="model")
def test_create_training_pipeline_custom_training_managed_dataset_sample(
        capsys, shared_state, pipeline_client):
    create_training_pipeline_custom_training_managed_dataset_sample.create_training_pipeline_custom_training_managed_dataset_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_display_name=MODEL_DISPLAY_NAME,
        dataset_id=DATASET_ID,
        annotation_schema_uri=ANNOTATION_SCHEMA_URI,
        training_container_spec_image_uri=TRAINING_CONTAINER_SPEC_IMAGE_URI,
        model_container_spec_image_uri=MODEL_CONTAINER_SPEC_IMAGE_URI,
        base_output_uri_prefix=BASE_OUTPUT_URI_PREFIX,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
def test_ucaip_generated_create_batch_prediction_job_bigquery_sample(
        capsys, shared_state):

    model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}"

    create_batch_prediction_job_bigquery_sample.create_batch_prediction_job_bigquery_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        model_name=model_name,
        bigquery_source_input_uri=BIGQUERY_SOURCE_INPUT_URI,
        bigquery_destination_output_uri=BIGQUERY_DESTINATION_OUTPUT_URI,
        instances_format=INSTANCES_FORMAT,
        predictions_format=PREDICTIONS_FORMAT,
    )

    out, _ = capsys.readouterr()

    # Save resource name of the newly created batch prediction job
    shared_state["batch_prediction_job_name"] = helpers.get_name(out)
def training_pipeline_id(capsys):
    create_training_pipeline_sample.create_training_pipeline_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        training_task_definition=TRAINING_DEFINITION_GCS_PATH,
        dataset_id=DATASET_ID,
        model_display_name=f"Temp Model for {DISPLAY_NAME}",
    )

    out, _ = capsys.readouterr()

    training_pipeline_name = helpers.get_name(out)

    assert "/" in training_pipeline_name

    training_pipeline_id = training_pipeline_name.split("/")[-1]

    yield training_pipeline_id

    delete_training_pipeline_sample.delete_training_pipeline_sample(
        project=PROJECT_ID, training_pipeline_id=training_pipeline_id)
Example #27
0
def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state):

    create_training_pipeline_tabular_forecasting_sample.create_training_pipeline_tabular_forecasting_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name="permanent_tabular_forecasting_model",
        target_column=TARGET_COLUMN,
        time_series_identifier_column="county",
        time_column="date",
        static_columns=["state_name"],
        time_variant_past_only_columns=["deaths"],
        time_variant_past_and_future_columns=["date"],
        forecast_window_end=10,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #28
0
def test_ucaip_generated_create_training_pipeline_sample(capsys, shared_state):

    shared_state["cancel_batch_prediction_job_timeout"] = 300

    create_training_pipeline_tabular_forecasting_sample.create_training_pipeline_tabular_forecasting_sample(
        project=PROJECT_ID,
        display_name=DISPLAY_NAME,
        dataset_id=DATASET_ID,
        model_display_name="permanent_tabular_forecasting_model",
        target_column=TARGET_COLUMN,
        time_series_identifier_column="county",
        time_column="date",
        time_series_attribute_columns=["state_name"],
        unavailable_at_forecast=["deaths"],
        available_at_forecast=["date"],
        forecast_horizon=10,
    )

    out, _ = capsys.readouterr()
    assert "response:" in out

    # Save resource name of the newly created training pipeline
    shared_state["training_pipeline_name"] = helpers.get_name(out)
Example #29
0
import random
import sqlite3
import string

from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5.QtGui import *

import sys

# create a Window class
from helpers import get_name
NAME = get_name().replace('-', '_')
letters = string.ascii_lowercase
GAME_NAME = ''.join(random.choice(letters) for i in range(10))

from trainingWin import TrainingWin
from connect_game import ConnectGame
from create_game import CreateGame

training_window = None


class Window(QMainWindow):
    # constructor
    def __init__(self):
        super().__init__()
        self.setGeometry(100, 100, 800, 600)
        self.setWindowTitle("New Game")
        self.main_menu()
Example #30
0
def clean_roster_data(version):

    if version == '11':
        ext = '.xls'
    else:
        ext = '.xlsx'

    teams = helpers.get_teams(version)

    for team in teams:
        print(team)

        if version == '2002':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '2003':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df['First Name'], df['Last Name'] = df['Name'].str.split(' ',
                                                                     1).str
            df.rename(columns={'Overall Rating': 'Overall'}, inplace=True)
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '2004':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df['First Name'], df['Last Name'] = df['Name'].str.split(' ',
                                                                     1).str
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '2005' or version == '06':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={
                'FIRSTNAME': 'First Name',
                'LASTNAME': 'Last Name',
                'OVERALLRATING': 'Overall'
            },
                      inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '07':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={
                'PLYR_FIRSTNAME': 'First Name',
                'PLYR_LASTNAME': 'Last Name',
                'PLYR_OVERALLRATING': 'Overall'
            },
                      inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '08':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={
                'First_Name': 'First Name',
                'Last_Name': 'Last Name',
                'Overall_Rating': 'Overall'
            },
                      inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '09':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={
                'FIRSTNAME': 'First Name',
                'LASTNAME': 'Last Name',
                'OVERALL': 'Overall'
            },
                      inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '10':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df.rename(columns={
                'First': 'First Name',
                'Last': 'Last Name',
                'OVR': 'Overall',
                'POS': 'Position'
            },
                      inplace=True)
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '11':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df.rename(columns={
                'FIRST NAME': 'First Name',
                'LAST NAME': 'Last Name',
                'OVERALL RATING': 'Overall',
                'POSITION': 'Position'
            },
                      inplace=True)
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '12':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df = df.drop(df[df.Name == 'Name'].index)
            df.dropna(subset=['Name'], inplace=True)
            df['First Name'], df['Last Name'] = df['Name'].str.split(' ',
                                                                     1).str
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '13' or version == '17' or version == '18' or version == '25':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '15':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={
                'FIRST': 'First Name',
                'LAST': 'Last Name',
                'OVERALL RATING': 'Overall',
                'POSITION': 'Position'
            },
                      inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '16':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df.rename(columns={'OVR': 'Overall'}, inplace=True)
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)

        if version == '19':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=0)
            df['First Name'], df['Last Name'] = df['Name'].str.split(' ',
                                                                     1).str
            df = df[['Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv')

        if version == '20':
            df = pd.read_excel('MaddenRosters/' + version + '/' + team + ext,
                               index_col=None)
            df['Team'] = helpers.get_name(team)
            df['First Name'], df['Last Name'] = df['Name'].str.split(' ',
                                                                     1).str
            df = df[['Team', 'Position', 'First Name', 'Last Name', 'Overall']]
            df['Version'] = version
            df.to_csv('MaddenRosters/' + version + '/' + team + '.csv',
                      index=False)
Example #31
0
def get_name_and_pw():
    print("You will need your login information for https://www.dmr.nd.gov/oilgas/")
    global username
    global password
    username = get_name()
    password = get_pass()
# session. if a session refered by i has a value of 5, it
# means that it has 5 correlated objects with a predecessor at
# exactly i times steps behind.
session_sequences = {}
# keeps track of the number of events taken into account for one
# session in order to normalize
session_norm = {}
for file_path in hp.get_csv():
# iterating over the files (sessions)
    session_id = hp.get_id(file_path)
    # The Id in an integer
    session_id = int(session_id)
    session_norm[session_id] = 0
    # the name of the session is used as a table name in the
    # database
    session_name = hp.get_name(file_path)
    # array of arrays, stores for each coordinated object the
    # list of timespans between student switches
    event_time_differences = []

    con = lite.connect('mysteries.db')
    with con:
        cur = con.cursor()
        # Gets the list of all distinct objects
        cur.execute("\
            SELECT distinct_objects FROM (\
                SELECT DISTINCT object_nature || object_id\
                    AS distinct_objects\
                FROM %s\
                UNION\
                SELECT DISTINCT 'group' || on_group\