Ejemplo n.º 1
0
 def test_sum_expeneses_two_rows(self):
     da = DataAccess(database=TEST_DB)
     da.insert(**TEST_ROW_1)
     da.insert(**TEST_ROW_2)
     sum_result = da.sum_expenses()
     da.close()
     self.assertAlmostEqual(sum_result, 26.99)
Ejemplo n.º 2
0
def get_data_heading(model_id,
                     driver_id,
                     repeat,
                     test=False,
                     moving_average_window=3,
                     stops=False,
                     version=1):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        raise Exception

    driver_train, driver_test = da.get_rides_split(driver_id,
                                                   settings.BIG_CHUNK)
    other_train = list(
        da.get_random_rides(settings.BIG_CHUNK * repeat, driver_id, seed=seed))
    other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

    set1 = driver_train + other_train  # used for training
    set2 = driver_test + other_test  # used for testing

    set1 = [heading.get_ride_heading(ride, variations=True, \
        moving_average_window=moving_average_window, stops=stops, version=version) for ride in set1]
    set2 = [util.get_list_string(heading.get_ride_heading(ride, \
        moving_average_window=moving_average_window, stops=stops, version=version)) for ride in set2]

    set1 = list(itertools.chain(*set1))

    set1 = [util.get_list_string(r) for r in set1]

    vectorizer = CountVectorizer(min_df=2, ngram_range=(1, 15), max_df=1000000)
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)

    return set1, set2
Ejemplo n.º 3
0
def segment_driver(driver_id):
    ''' this generated the segments in settings.SEGMENTS_FOLDER[1] '''
    da = DataAccess()
    for ride_id_minus_1, ride in enumerate(da.get_rides(driver_id)):
        ride_id = ride_id_minus_1 + 1
        if da.skip_segment(driver_id, ride_id):
            continue

        # apply the Ramer-Douglas-Peucker algorithm
        ride = [p + [i]
                for i, p in enumerate(smoothen(ride))]  # enrich with timestamp
        ride = rdp(ride, epsilon=10)

        lengths = [
            util.euclidian_distance(ride[i - 1], ride[i])
            for i in xrange(1, len(ride))
        ]
        times = [ride[i][2] - ride[i - 1][2] for i in xrange(1, len(ride))]
        angles = [
            util.get_angle(ride[i - 2], ride[i - 1], ride[i])
            for i in xrange(2, len(ride))
        ]

        # bucket the values
        lengths = util.bucket(np.log(lengths), 25,
                              [2.2, 8])  # [int(l) for l in lengths]
        times = util.bucket(np.log(times), 20,
                            [1, 5.5])  # [int(t) for t in times]
        angles = util.bucket(angles, 30, [0, 180])  # [int(a) for a in angles]

        # write results
        da.write_ride_segments(driver_id, ride_id, lengths, times, angles)

    logging.info('finished segmenting driver %s' % driver_id)
Ejemplo n.º 4
0
def get_data_g_forces_v6(model_id, driver_id, repeat, test=False, version=1):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        set1 = list(da.get_rides(driver_id))  # first half of the train set
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                seed=seed))  # second half of the train set
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                seed=seed))
        other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

        set1 = driver_train + other_train  # used for training
        set2 = driver_test + other_test  # used for testing

    set1 = [util.get_g_forces_v4(ride, version=version) for ride in set1]
    set2 = [util.get_g_forces_v4(ride, version=version) for ride in set2]

    vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 20))
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)

    return set1, set2
Ejemplo n.º 5
0
def get_data_basic_accel(model_id, driver_id, repeat, test=False, version=1):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        set1 = list(da.get_rides(driver_id))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                seed=seed))

    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                seed=seed))
        other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    set1 = [util.build_features_acc(ride, version=version) for ride in set1]
    set2 = [util.build_features_acc(ride, version=version) for ride in set2]
    return np.array(set1), np.array(set2)
Ejemplo n.º 6
0
def get_data_acc4acc(model_id, driver_id, repeat, test=False, version=1):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        set1 = list(da.get_rides(driver_id))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                seed=seed))
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                seed=seed))
        other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    set1 = [
        util.get_acc4acc_words(ride, step=3, version=version) for ride in set1
    ]
    set2 = [
        util.get_acc4acc_words(ride, step=3, version=version) for ride in set2
    ]

    max_ngram = 15 if version == 1 else 20
    vectorizer = CountVectorizer(min_df=1, ngram_range=(1, max_ngram))
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)

    return set1, set2
Ejemplo n.º 7
0
def evaluate_iex_stocks():
    print('Started evaluating IEX stocks ...', flush=True)
    filename = 'sandp_top_250'
    os.environ["IEX_API_KEY"] = config.data_iex_api_key
    data_access = DataAccess(filename)
    global iex_stocks
    iex_stocks = evaluate_stocks('iex', f'{filename}.csv', data_access)
Ejemplo n.º 8
0
def get_data_fft(model_id, driver_id, repeat, test=False, version=1):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        set1 = list(da.get_rides(driver_id))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                seed=seed))

    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                seed=seed))
        other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    if version == 1:
        set1 = [util.fft(ride) for ride in set1]
        set2 = [util.fft(ride) for ride in set2]
    else:
        set1 = [util.fft_strip(ride) for ride in set1]
        set2 = [util.fft_strip(ride) for ride in set2]

    return np.array(set1), np.array(set2)
Ejemplo n.º 9
0
    def export_video_ids_json(self):
        da = DataAccess()
        videos = da.get_all_videos(sort=True)

        vids = [video["id"] for video in videos]

        with open(os.path.join(self.export_dir, "video_ids.txt"), mode="w") as f:
            f.write(json.dumps(vids, indent=2))
Ejemplo n.º 10
0
def test_model_heading(model_id, driver_id, Model, get_data, repeat):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()

    set1 = list(da.get_rides(driver_id))  # first half of the train set
    set2 = list(
        da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                            driver_id,
                            seed=seed))  # second half of the train set

    moving_average_window = 6 if get_data == get_data_heading_v2 else 3
    set1 = [heading.get_ride_heading(ride, variations=True, \
        moving_average_window=moving_average_window) for ride in set1]
    set2 = [heading.get_ride_heading(ride, variations=True, \
        moving_average_window=moving_average_window) for ride in set2]

    set1 = [[util.get_list_string(r) for r in four_pack] for four_pack in set1]
    set2 = [[util.get_list_string(r) for r in four_pack] for four_pack in set2]

    vectorizer = CountVectorizer(min_df=2, ngram_range=(1, 15), max_df=1000000)
    vectorizer.fit([r[0] for r in set1])
    rides = [[vectorizer.transform([r])[0] for r in four_pack]
             for four_pack in set1]
    other_rides = [[vectorizer.transform([r])[0] for r in four_pack]
                   for four_pack in set2]
    other_rides = list(itertools.chain(*other_rides))

    rides = np.array(rides)

    trainY = [1] * settings.BIG_CHUNK_TEST * 4 * repeat + [
        0
    ] * settings.BIG_CHUNK_TEST * 4 * repeat
    kf = KFold(200,
               n_folds=settings.FOLDS,
               shuffle=True,
               random_state=driver_id)
    predictions = ['bug'] * 200
    for train_fold, test_fold in kf:
        trainX = rides[train_fold]
        trainX = scipy.sparse.vstack(
            list(itertools.chain(*trainX)) * repeat + \
            other_rides
        )
        testX = scipy.sparse.vstack([r[0] for r in rides[test_fold]])

        assert (trainX.shape[0] == len(trainY))
        assert (testX.shape[0] == settings.SMALL_CHUNK_TEST)

        model = Model(trainX, trainY, driver_id)
        fold_predictions = model.predict(testX)
        for i, v in enumerate(test_fold):
            predictions[v] = fold_predictions[i]

    predictions = np.array(predictions)
    if settings.ENABLE_CACHE:
        util.cache_results(Model, get_data, driver_id, True, predictions,
                           repeat)
    return driver_id, predictions
Ejemplo n.º 11
0
def get_data_segment_angles(model_id,
                            driver_id,
                            repeat,
                            test=False,
                            segment_version=1,
                            extra=((1, 1), 2)):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    ngram_range, min_df = extra

    if test:
        set1 = list(da.get_rides_segments(driver_id, version=segment_version))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                segments=True,
                                version=segment_version,
                                seed=seed))
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK,
                                                       segments=True,
                                                       version=segment_version)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                segments=True,
                                version=segment_version,
                                seed=seed))
        other_test = list(
            da.get_random_rides(settings.SMALL_CHUNK,
                                driver_id,
                                segments=True,
                                version=segment_version))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    # create features for each (segment, angle, segment) tuple
    set1 = [[
        '%s_%s_%s' % (d[0][i - 1], d[1][i - 1], d[0][i])
        for i in xrange(1, len(d[0]))
    ] for d in set1]
    set2 = [[
        '%s_%s_%s' % (d[0][i - 1], d[1][i - 1], d[0][i])
        for i in xrange(1, len(d[0]))
    ] for d in set2]

    set1 = [util.get_list_string(d) for d in set1]
    set2 = [util.get_list_string(d) for d in set2]

    vectorizer = CountVectorizer(min_df=min_df, ngram_range=ngram_range)
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)
    return set1, set2
Ejemplo n.º 12
0
    def export_video_ids_tsv(self):
        da = DataAccess()
        videos = da.get_all_videos(sort=True)
        with open(
            os.path.join(self.export_dir, "vids.tsv"), mode="w", encoding="utf-8"
        ) as f:
            f.write(f"video_id\tvideo_title\n")

            for video in videos:
                vtitle = video["snippet"]["title"]
                f.write(f"{video['id']}\t{vtitle}\n")
Ejemplo n.º 13
0
 def insert_expense(self):
     insert_data_access = DataAccess()
     insert_dict = {
         'description': self.description.get(),
         'amount': self.amount.get(),
         'file_path': self.receipt.get(),
         'date': self.date.get(),
     }
     insert_data_access.insert(**insert_dict)
     insert_data_access.close()
     self.sum += float(self.amount.get())
     self.sum_label.set(format_sum_string(self.sum))
     self.add_window.destroy()
Ejemplo n.º 14
0
def get_data_segment_lengths(model_id,
                             driver_id,
                             repeat,
                             test=False,
                             segment_version=1,
                             extra=((1, 8), 1)):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    ngram_range, min_df = extra

    if test:
        set1 = list(da.get_rides_segments(driver_id, version=segment_version))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                segments=True,
                                version=segment_version,
                                seed=seed))
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK,
                                                       segments=True,
                                                       version=segment_version)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                segments=True,
                                version=segment_version,
                                seed=seed))
        other_test = list(
            da.get_random_rides(settings.SMALL_CHUNK,
                                driver_id,
                                segments=True,
                                version=segment_version))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    # keep only lengths
    set1 = [d[0] for d in set1]
    set2 = [d[0] for d in set2]

    # convert to text
    set1 = [util.get_list_string(d) for d in set1]
    set2 = [util.get_list_string(d) for d in set2]

    vectorizer = CountVectorizer(min_df=min_df, ngram_range=ngram_range)
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)
    return set1, set2
Ejemplo n.º 15
0
def get_data_movements_v1(model_id,
                          driver_id,
                          repeat,
                          test=False,
                          step=5,
                          tf=False,
                          version=1,
                          extra=((1, 5), 2)):
    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    ngram_range, min_df = extra

    if test:
        set1 = list(da.get_rides(driver_id))
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                segments=False,
                                seed=seed))
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK,
                                                       segments=False)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                segments=False,
                                seed=seed))
        other_test = list(
            da.get_random_rides(settings.SMALL_CHUNK,
                                driver_id,
                                segments=False))

        set1 = driver_train + other_train
        set2 = driver_test + other_test

    # keep only lengths and convert to text
    set1 = [util.build_features3(r, step=step, version=version) for r in set1]
    set2 = [util.build_features3(r, step=step, version=version) for r in set2]

    if tf:
        vectorizer = TfidfVectorizer(min_df=min_df, ngram_range=ngram_range)
    else:
        vectorizer = CountVectorizer(min_df=min_df, ngram_range=ngram_range)

    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)

    return set1, set2
Ejemplo n.º 16
0
    def get_videos_for_pitems(self, pitems: List) -> List:
        print("Requesting videos for playlist items.")

        vids: List[str] = [
            pitem["contentDetails"]["videoId"] for pitem in pitems
        ]
        data = []

        # Filter out videos we already have.
        da = DataAccess()
        vids = [vid for vid in vids if not da.have_video(vid)]

        for items in gen_resources_for_ids(
                self.youtube.videos,
                vids,
                part="snippet,statistics",
        ):
            data += items

        return data
Ejemplo n.º 17
0
def get_data_g_forces_v1(model_id,
                         driver_id,
                         repeat,
                         test=False,
                         min_df=1,
                         ngram_range=(1, 10),
                         digitize=0):
    def process(ride, digitize):
        g_forces = util.get_g_forces(ride)
        if digitize:
            g_forces = np.digitize(g_forces, range(0, 800, digitize))
        return util.get_list_string(g_forces)

    seed = random.Random(x=driver_id + model_id)
    da = DataAccess()
    if test:
        set1 = list(da.get_rides(driver_id))  # first half of the train set
        set2 = list(
            da.get_random_rides(settings.BIG_CHUNK_TEST * repeat,
                                driver_id,
                                seed=seed))  # second half of the train set
    else:
        driver_train, driver_test = da.get_rides_split(driver_id,
                                                       settings.BIG_CHUNK)
        other_train = list(
            da.get_random_rides(settings.BIG_CHUNK * repeat,
                                driver_id,
                                seed=seed))
        other_test = list(da.get_random_rides(settings.SMALL_CHUNK, driver_id))

        set1 = driver_train + other_train  # used for training
        set2 = driver_test + other_test  # used for testing

    set1 = [process(ride, digitize) for ride in set1]
    set2 = [process(ride, digitize) for ride in set2]

    vectorizer = CountVectorizer(min_df=min_df, ngram_range=ngram_range)
    set1 = vectorizer.fit_transform(set1)
    set2 = vectorizer.transform(set2)

    return set1, set2
Ejemplo n.º 18
0
def main():
    # Disable OAuthlib's HTTPS verification when running locally.
    # *DO NOT* leave this option enabled in production.
    os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"

    # Get the API key as a CLI arg.
    api_key = sys.argv[1]
    if not api_key:
        raise Exception("No API key provided.")

    # Get credentials and create an API client
    youtube = YouTube(api_key)

    # Do stuff.
    da = DataAccess()

    pitems_dict = da.get_pitems_dict(OTHER_PLAYLIST_IDS)

    current_vid = "lM28rfsHge0"
    # save_threads(youtube, da, from_vid=current_vid, dry_run=False)
    # save_all_playlist_items(youtube, OTHER_PLAYLIST_IDS, dry_run=False)
    save_all_videos(youtube, pitems_dict, dry_run=False)
Ejemplo n.º 19
0
def segment_driver_v2(driver_id):
    ''' this generated the segments in settings.SEGMENTS_FOLDER[2] '''
    da = DataAccess()
    for ride_id_minus_1, ride in enumerate(da.get_rides(driver_id)):
        ride_id = ride_id_minus_1 + 1
        if da.skip_segment(driver_id, ride_id, version=2):
            continue

        # apply the Ramer-Douglas-Peucker algorithm
        ride = [p + [i] for i, p in enumerate(ride)]  # enrich with timestamp
        ride = rdp(ride, epsilon=4)

        lengths = [
            util.euclidian_distance(ride[i - 1], ride[i])
            for i in range(1, len(ride))
        ]
        times = [ride[i][2] - ride[i - 1][2] for i in range(1, len(ride))]
        angles = [
            util.get_angle(ride[i - 2], ride[i - 1], ride[i])
            for i in range(2, len(ride))
        ]

        lengths = np.histogram(lengths,
                               bins=list(range(0, 700, 20)) + [1000000000])[0]
        times = np.histogram(times,
                             bins=list(range(0, 60, 4)) + [1000000000])[0]
        angles = np.histogram(angles, bins=list(range(0, 181, 20)))[0]

        # write results
        da.write_ride_segments(driver_id,
                               ride_id,
                               lengths,
                               times,
                               angles,
                               version=2)

    logging.info('finished segmenting driver %s' % driver_id)
Ejemplo n.º 20
0
 def test_constructor_with_default_params(self):
     da = DataAccess()
     self.assertEqual(da.database, 'expenses.db')
     self.assertEqual(da.table_name, 'expenses')
     da.close()
Ejemplo n.º 21
0
 def test_create_when_table_already_exists(self):
     da = DataAccess(database=TEST_DB)
     da.create()
     da.create()
     da.close()
     self.assertEqual(_table_exists(), 1)
Ejemplo n.º 22
0
 def test_insert_adding_two_rows(self):
     da = DataAccess(database=TEST_DB)
     da.insert(**TEST_ROW_1)
     da.insert(**TEST_ROW_2)
     da.close()
     self.assertEqual(_get_row_count(), 2)
Ejemplo n.º 23
0
def evaluate_sandp500_stocks():
    print('Started evaluating STOOQ.com stocks ...', flush=True)
    data_access = DataAccess('sandp500')
    global sandp500_stocks
    sandp500_stocks = evaluate_stocks('stooq', 'sandp500.csv', data_access)
Ejemplo n.º 24
0
 def test_insert_no_kwargs_raises_InvalidDataError(self):
     da = DataAccess(database=TEST_DB)
     self.assertRaises(InvalidDataError, da.insert)
Ejemplo n.º 25
0
 def test_sum_expenses_one_row(self):
     da = DataAccess(database=TEST_DB)
     da.insert(**TEST_ROW_1)
     sum_result = da.sum_expenses()
     da.close()
     self.assertEqual(sum_result, 12.75)
Ejemplo n.º 26
0
def get_sum():
    sum_data_access = DataAccess()
    sum_result = sum_data_access.sum_expenses()
    sum_data_access.close()
    return sum_result
Ejemplo n.º 27
0
 def __init__(self):
     connection_string = SvcConfig.db_connection_str
     data_access = DataAccess(connection_string)
     self.data_access = data_access
Ejemplo n.º 28
0
 def test_sum_expenses_missing_table_is_zero(self):
     da = DataAccess(database=TEST_DB)
     sum_result = da.sum_expenses()
     da.close()
     self.assertEqual(sum_result, 0)
Ejemplo n.º 29
0
 def test_insert_extra_fields(self):
     da = DataAccess(database=TEST_DB)
     da.insert(**TEST_ROW_3)
     self.assertEqual(_get_row_count(), 1)
Ejemplo n.º 30
0
def evaluate_fse_stocks():
    print('Started evaluating QUANDL stocks ...', flush=True)
    data_access = DataAccess('quandl_fse_stocks')
    global fse_stocks
    fse_stocks = evaluate_stocks('quandl', 'quandl_fse_stocks.csv',
                                 data_access)