def get_video_data(channel_id):
    yt_rss_url = "https://www.youtube.com/feeds/videos.xml?channel_id=" + channel_id
    feed = fp.parse(yt_rss_url)
    channel_lang = feed["feed"]["title_detail"]["language"]
    print(feed["feed"])
    entries = feed["entries"]
    channels_timestamps = "channels_timestamps.csv"
    # clear any existing queue before start
    queue = []
    # read contents of channels_timestamps.csv, create list object of contents
    ct = open(channels_timestamps, "r")
    ctr = ct.read().split("\n")
    ct.close()
    ctr_line = []
    channel_found = False
    # check if channel ID is found in channels_timestamps.csv
    for line in ctr:
        line_list = line.split(',')
        if channel_id == line_list[0]:
            channel_found = True
            ctr_line = line
            break
    if not channel_found:
        print("new channel added to config: " + channel_id)
    print(channel_id)
    # iterate through video entries for channel, parse data into objects for use
    for pos, i in enumerate(reversed(entries)):
        published = i["published"]
        updated = i["updated"]
        if not channel_found:
            # add the video to the queue
            queue.append(i)
            ctr_line = str(channel_id + "," + published + "," + updated + '\n')
            # add the new line to ctr for adding to channels_timestamps later
            ctr.append(ctr_line)
            channel_found = True
        # if the channel exists in channels_timestamps, update "published" time in the channel line
        else:
            published_int = utils.convert_timestamp(published)
            ctr_line_list = ctr_line.split(",")
            line_published_int = utils.convert_timestamp(ctr_line_list[1])
            if published_int > line_published_int:
                # update the timestamp in the line for the channel in channels_timestamps,
                ctr.remove(ctr_line)
                ctr_line = str(channel_id + "," + published + "," + updated +
                               '\n')
                ctr.append(ctr_line)
                # and add current videos to queue.
                queue.append(i)
        print(published)
    # write the new channels and timestamps line to channels_timestamps.csv
    ct = open(channels_timestamps, "w")
    for line in ctr:
        if line != '':
            ct.write(line + "\n")
    ct.close()
    return queue, channel_lang
Beispiel #2
0
 def test_timestamp_transform(self):
     """
     it converts the timestamp to seconds if it was passed in ms
     """
     now = datetime.now()
     timestamp_ms = int(datetime.timestamp(now)) * 1000
     timestamp_s = int(datetime.timestamp(now))
     result_ms = utils.convert_timestamp(timestamp_ms)
     result_s = utils.convert_timestamp(timestamp_s)
     self.assertEquals(result_ms.year, now.year)
     self.assertEquals(result_s.year, now.year)
Beispiel #3
0
 def extract_candlestick_from_wss(data: dict) -> 'self':
     return Candlestick(
         open_time = convert_timestamp(data.get("t", None)),
         open = data.get("o", None),
         high = data.get("h", None),
         low = data.get("l", None),
         close = data.get("c", None),
         volume = data.get("v", None),
         close_time = convert_timestamp(data.get("T", None)),
         quote_asset_volume = data.get("q", None),
         trades_amount = data.get("n", None),
         pair = data.get("s", None),
         interval = data.get("i", None),
     )
Beispiel #4
0
    def run(self, dispatcher, tracker, domain):

        try:
            slots = tracker.current_slot_values()
            slot_time = slots['time']
            f_date = convert_timestamp(slot_time)
            date_s = f_date.strftime("%Y-%m-%d")
            str_date = f_date.strftime('%B %d, %Y')
        except:
            f_date = date.today()
            date_s = f_date.strftime("%Y-%m-%d")
            str_date = f_date.strftime('%B %d, %Y')
            # dispatcher.utter_message(text='Please enter the date properly')
            # return [AllSlotsReset()]

        try:
            doc = get_doc(date_s)
            # st = f"""DATE: {date}\nAir Temparature: {doc['airTemp']}\nSoil Temparature: {doc['soilTemp']}\nMoisture: {doc['moisture']}\nPressure: {doc['pressure']}\nHumidity: {doc['humidity']}\nPhosphorus: {doc['phosphorus']}\nNitrogen: {doc['nitrogen']}\nPotassium: {doc['potassium']}\nSolar Radiation: {doc['solarRad']}\nSalinity: {doc['salinity']}\nPH: {doc['pH']}"""
            st = f'Sensor data on {str_date}'
            for key in param_arr:
                st += '\n{:<12}: {:.2f}'.format(key, float(doc[key]))
            dispatcher.utter_message(text=st)
        except:
            dispatcher.utter_message(text='No data recorded on ' + str_date)

        return [AllSlotsReset()]
Beispiel #5
0
    def extract_candlestick_from_api(response, pair, interval):
        (
            open_time,
            open,
            high,
            low,
            close,
            volume,
            close_time,
            quote_asset_volume,
            trades_amount,
            *_
         ) = response

        # convert timestamps to datetime
        open_time, close_time = convert_timestamp([open_time, close_time])

        return Candlestick(
            open_time,
            open,
            high,
            low,
            close,
            volume,
            close_time,
            quote_asset_volume,
            trades_amount,
            pair,
            interval
        )
Beispiel #6
0
 def add_json_data(self, json_trial):
     self.trial_num = json_trial['current_trial']
     trial_data = json_trial['trialdata']
     self.user_action = trial_data['action']
     self.pumps = trial_data['pumps']
     self.pop_point = trial_data['pop_point']
     self.balloon_num = trial_data['balloon']
     self.timestamp = convert_timestamp(json_trial['dateTime'])
Beispiel #7
0
    def test_returns_datetime(self):
        """
        it returns datetime object
        """

        timestamp = datetime.timestamp(datetime.now())
        result = utils.convert_timestamp(timestamp)
        self.assertIsInstance(result, datetime)
Beispiel #8
0
    def parse(self, curr: str):
        params = {"currency": curr}
        response = requests.get(self.url, params)
        result = response.json()

        item = dict(exchange="coinone",
                    name=curr,
                    price=int(result["last"]),
                    volume=round(float(result["volume"])),
                    date=convert_timestamp(result['timestamp']))
        self.items.append(item)
Beispiel #9
0
    def test_returns_list_of_datetimes(self):
        """
        it returns correct list of datetime objects
        """

        timestamps = [datetime.timestamp(datetime.now()) for i in range(10)]
        result = utils.convert_timestamp(timestamps)
        self.assertIsInstance(result, list)
        self.assertEquals(len(result), len(timestamps))

        # all results are datetime
        for r in result:
            self.assertIsInstance(r, datetime)
Beispiel #10
0
    def add_json_data(self, json_event):
        """ Parse and add backbone.js json data for a event """

        self.event_type = json_event['eventtype']
        self.value = str(json_event['value'])

        # if isinstance(['value'], list):
        #     self.value_1 = str(json_event['value'][0])
        #     self.value_2 = str(json_event['value'][1])
        #     self.value_3 = str(json_event['value'])
        # self.interval =
        self.interval = json_event['interval']
        self.timestamp = convert_timestamp(json_event['timestamp'])

        current_app.logger.info("%s added to EventData for session id %s " %
                                (self.ev_id, self.session_id))
Beispiel #11
0
def process_station(conn, station):
    cur = conn.cursor()

    station_id = db_manager.get_station_id(cur, station)
    info = mvv_reader.departure_information(station_id)

    current_time = datetime.now()

    for departure in only_sbahn(info['departures']):
        departure['station'] = station_id
        dep_db = db_manager.get_departure(cur, departure['departureId'])

        if not (dep_db and len(dep_db) == 8
                and dep_db[5] == departure['destination']):
            departure['created_at'] = departure[
                'updated_at'] = current_time.isoformat()
            db_manager.insert_departure(cur, departure)
        else:
            (created_at, updated_at, _, delay, _, destination, platform,
             cancelled) = dep_db
            fields = {}
            if delay != departure['delay']:
                fields['delay'] = departure['delay']
            if platform != departure['platform']:
                fields['platform'] = departure['platform']
            if cancelled != departure['cancelled']:
                fields['cancelled'] = departure['cancelled']

            delayed_arrival = convert_timestamp(
                departure['departureTime']) + timedelta(
                    minutes=departure['delay'])
            if current_time > delayed_arrival:
                fields['delay'] = int(
                    abs(current_time - delayed_arrival).seconds / 60)

            if fields:
                fields['updated_at'] = current_time.isoformat()
                db_manager.update_departure(cur, departure['departureId'],
                                            fields)

    try:
        conn.commit()
        # print(f'🆗 {station}')
    except Exception as e:
        conn.rollback()
        logger.error(str(e))
        print(f'🚫 {station}')
Beispiel #12
0
    def add_json_data(self, json_trial):
        """ Parse and add backbone.js json data for a trial """
        self.trial_num = json_trial['current_trial']

        # Parse nested JSON data to extract, acc, RT
        trial_data = json_trial['trialdata']

        self.response = trial_data['resp']

        # TODO: Add specific stimuli. Requires editing JSON
        self.question = "null"
        self.answer = "null"
        self.user_answer = "null"

        # Special case for accuracy
        if trial_data['acc'] == "FORWARD":
            self.accuracy = 11
        elif trial_data['acc'] == "BACK":
            self.accuracy = 22
        elif trial_data['acc'] == "NA":
            self.accuracy = 99
        else:
            self.accuracy = trial_data['acc']

        # Special case for reaction time
        if trial_data['rt'] == "NA":
            self.reaction_time = 0
        else:
            self.reaction_time = trial_data['rt']

        # Datetime conversion
        jsts = json_trial['dateTime']  # Javscript timestamp
        self.timestamp = convert_timestamp(jsts)

        # Remove invalid charachters from block name (e.g. "\n")
        self.block = clean_db_string(trial_data['block'])

        current_app.logger.info(
            "%s added to CategorySwitch for session id %s and JSON CS is %s" %
            (self.trial_num, self.session_id, json_trial))
Beispiel #13
0
def insert_departure(cur, departure):
    cur.execute(
        """
            INSERT INTO departures (
            created_at,
            updated_at,
            station,
            departure_id,
            departure_time,
            passed_before,
            product,
            destination,
            delay,
            platform,
            cancelled
            ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """, (
            departure['created_at'], departure['updated_at'], departure['station'],
            departure['departureId'], convert_timestamp(departure['departureTime']), departure.get('passedBefore'),
            departure['product'] + ':' + departure['label'], departure['destination'], departure['delay'],
            departure['platform'], departure['cancelled']
        )
    )
Beispiel #14
0
    def add_json_data(self, json_trial):
        """ Parse and add backbone.js json data for a trial """

        self.trial_num = json_trial['current_trial']
        trial_data = json_trial['trialdata']

        if 'rt' not in trial_data:
            self.reaction_time = 0
        else:
            self.reaction_time = float(trial_data['rt'])

#current_app.logger.info("self.reaction_time is %s") % (self.reaction_time)

        if 'acc' not in trial_data:
            self.accuracy = "null"
        else:
            self.accuracy = trial_data['acc']

        if 'target_words' not in trial_data:
            self.target_words = "null"
        else:
            self.target_words = ",".join(
                trial_data['target_words']
            )  # [u'Mile', u'Cat', u'France']-->u'Mile,Cat,France'
        if 'input_words' not in trial_data:
            self.input_words = "null"
        else:
            self.input_words = ",".join(trial_data['input_words'])

        # Datetime conversion
        self.timestamp = convert_timestamp(json_trial['dateTime'])
        self.block = clean_db_string(trial_data['block'])

        current_app.logger.info(
            "%s added to KeepTrack for session id %s and whole JSON KT  %s " %
            (self.trial_num, self.session_id, json_trial))
Beispiel #15
0
def get_video_data(channel_url, channel_name, dupe_setting):
    feed = fp.parse(channel_url)
    entries = feed["entries"]
    channels_timestamps = "channels_timestamps.csv"
    # clear any existing queue before start
    queue = []
    # read contents of channels_timestamps.csv, create list object of contents
    ct = open(channels_timestamps, "r")
    ctr = ct.read().split("\n")
    ct.close()
    ctr_line = []
    channel_found = False
    # check if channel name is found in channels_timestamps.csv
    for line in ctr:
        line_list = line.split(',')
        if channel_name == line_list[0]:
            channel_found = True
            ctr_line = line
            break
    if not channel_found:
        print("new channel added to config: " + channel_name)
    print(
        str(datetime.now().strftime("%m/%d %H:%M:%S")) + " : checking " +
        str(len(entries)) + " in " + channel_name + "          ")
    print("\033[2A")
    # iterate through video entries for channel, parse data into objects for use
    for pos, i in enumerate(reversed(entries)):
        published = i["published"]
        title = i["title"]
        #updated = i["updated"]
        parsed = published
        if ("odysee" in channel_url) or ("bitchute"
                                         in channel_url) or ("podbean"):
            p = i["updated_parsed"]
            parsed = str(p.tm_year) + str(p.tm_mon).zfill(2) + str(
                p.tm_mday).zfill(2) + str(p.tm_hour).zfill(2) + str(
                    p.tm_min).zfill(2) + str(p.tm_sec).zfill(2)
            published_int = int(parsed)

        if "https://youtube" in channel_url:
            published_int = utils.convert_timestamp(published)
            parsed = str(published_int)
        if dupe_setting > 0 and utils.dupe_check(published_int, title,
                                                 dupe_setting):
            #go to next entry if already imported
            continue
        if not channel_found:
            # add the video to the queue
            queue.append(i)
            ctr_line = str(channel_name + "," + parsed + "," + parsed + '\n')
            # add the new line to ctr for adding to channels_timestamps later
            ctr.append(ctr_line)
            print("channel not found, adding " + ctr_line)
            channel_found = True
        # if the channel exists in channels_timestamps, update "published" time in the channel line
        else:
            ctr_line_list = ctr_line.split(",")
            line_published_int = int(ctr_line_list[1])
            if published_int > line_published_int:
                # update the timestamp in the line for the channel in channels_timestamps,
                ctr.remove(ctr_line)
                ctr_line = str(channel_name + "," + parsed + "," + parsed +
                               '\n')
                ctr.append(ctr_line)
                # and add current videos to queue.
                queue.append(i)
        #fix some of the typical differences in title text betwixt sites and comma induced errors in the csv
        title = title.replace(",", ".")
        title = title.replace("&#x27;", "'")
        title = title.replace("&quot;", "'")
        title = title.replace("(video)", "")
        #print("title:"+title)
        file = open("videos.log.csv", "a+")
        file.write(channel_name + "," + parsed + "," + title + "\n")
        file.close
    # write the new channels and timestamps line to channels_timestamps.csv
    ct = open(channels_timestamps, "w")
    for line in ctr:
        if line != '':
            ct.write(line + "\n")
    ct.close()
    return queue, "en"
def main():
    fileTotalManager = base.FileTotalManager('./file_lengths.json')
    # load data into memory
    print('loading csv data into memory...')

    genome_tags = base.load_data(
        '../data/genome-tags.csv', base.tags_adapter,
        fileTotalManager.getFileTotal('genome-tags.csv'))

    movies_info = base.load_data('../data/mlmovies.csv',
                                 base.movie_info_adapter,
                                 fileTotalManager.getFileTotal('mlmovies.csv'))

    ratings_info = base.load_data(
        '../data/mlratings.csv', base.RatingInfo,
        fileTotalManager.getFileTotal('mlratings.csv'))

    tags_info = base.load_data('../data/mltags.csv', base.TagInfo,
                               fileTotalManager.getFileTotal('mltags.csv'))

    print('loading completed!')

    # print(movie_actor[0].keys(), mltags[0].keys(), tags[0].keys(), mlmovies[0].keys(), mlusers[0].keys())
    print('preprocessing data...')

    # conversion
    min_ts, max_ts = base.convert_timestamp(tags_info, 'timestamp')
    # base.convert_timestamp(ratings_info, 'timestamp')
    genome_tags = {k['tagId']: k['tag'] for k in genome_tags}
    # movie_actor_list = base.get_moive_actor_list(movie_actor)
    # genres_movie_list, min_yr, max_yr = base.get_genre_movies_list(movies_info)
    movie_names = {k['movieid']: k['moviename'] for k in movies_info}

    # actor_names = {k['id']: k['name'] for k in actor_info}

    def tfidf_tag_weight(mr, ts):
        return (1.0 / mr) * (ts - min_ts + 1) / (max_ts - min_ts + 1)

    def no_weight(mr, ts):
        return 1

    print('building vectors')
    # actor_tags_vector
    # actors_tags_vector = base.actor_tag_vector(movie_actor, tags_info, no_weight)[1]
    # actors_idf, actors_tfidf_tags_vector = base.actor_tag_vector(movie_actor, tags_info, tfidf_tag_weight)
    # actors_idf = base.idf(actors_tfidf_tags_vector, actors_idf)
    # for actor in actors_tfidf_tags_vector.keys():
    #     actors_tfidf_tags_vector[actor] = base.tf_idf(actors_tfidf_tags_vector[actor], actors_idf, 'tf-idf')

    # movie_tags_vector
    print('Building standard movie-tag vector')
    movies_tags_vector = base.movie_tag_vector(movies_info, tags_info,
                                               no_weight)[1]

    print('\nBuilding tf-idf movie-tag vector')
    movies_idf, movies_tfidf_tags_vector = base.movie_tag_vector(
        movies_info, tags_info, tfidf_tag_weight)
    movies_idf = base.idf(movies_tfidf_tags_vector, movies_idf)
    for i, movie in enumerate(movies_tfidf_tags_vector.keys()):
        movies_tfidf_tags_vector[movie] = base.tf_idf(
            movies_tfidf_tags_vector[movie], movies_idf, 'tf-idf')

    # movie_actors_vector
    # movies_actors_vector = base.movie_actor_vector(movies_info, movie_actor, no_weight)[1]
    # movies_actor_idf, movies_tfidf_actors_vector = base.movie_actor_vector(movies_info, movie_actor, tfidf_actor_weight)
    # movies_actor_idf = base.idf(movies_tfidf_actors_vector, movies_actor_idf)
    # for movie in movies_tfidf_actors_vector.keys():
    #     movies_tfidf_actors_vector[movie] = base.tf_idf(movies_tfidf_actors_vector[movie], movies_actor_idf, 'tf-idf')

    # create actor-actor matrix
    # actor_actor_similarity, actors_list, actors_index = build_actor_actor_matrix(actors_tfidf_tags_vector)

    # create coactor-coactor matrix
    # coactor_coactor_matrix, coactors_list, coactors_index = build_coactor_coactor_matrix(movie_actor)

    # print('building AMY tensor')
    # create Actor-Movie-Year tensor (AMY tensor)
    # actor_movie_year_tensor, amy_tensor_info = build_actor_movie_year_tensor(movie_actor, movies_info)

    print('\nbuilding TMR tensor')
    # create Tag-Movie-Rating tensor (TMR tensor)
    tag_movie_rating, tmr_tensor_info = build_tag_movie_rating_tensor(
        genome_tags.keys(), ratings_info)

    print('creating list')
    # create watched list
    users_watched_movies = base.get_users_watched_movies(
        tags_info, ratings_info)

    # create watched movies info
    # watched_movies_info = base.get_moives_related_info(movies_info, ratings_info, movie_actor)

    print('preprocessing completed!')

    while True:
        command_line = input('query>')
        commands = command_line.split(' ')
        relevance_feedback = None

        if len(commands) > 0 and 'p3_task1' in commands[0]:
            if len(commands) == 3:
                if commands[2] == 'pf':
                    relevance_feedback = gen_prob_feedback_function(
                        movies_tags_vector)
                else:
                    if not (commands[2] == 'PCA' or commands[2] == 'SVD'):
                        help()
                        continue
            elif len(commands) == 4:
                if commands[3] == 'pf':
                    relevance_feedback = gen_prob_feedback_function(
                        movies_tags_vector)
                else:
                    help()
                    continue

            WeightConstants.initialize(movie_names, tags_info, ratings_info)

        if commands[0] == 'p3_task1a' and len(commands) > 2:
            user_id = int(commands[1])

            similarities = recommender_system_using_svd_pca(
                user_id, users_watched_movies, movies_tfidf_tags_vector,
                genome_tags, commands[2])

            print_output_using(user_id, similarities, relevance_feedback)

        elif commands[0] == 'p3_task1b' and len(commands) > 1:
            user_id = int(commands[1])

            similarities = recommender_system_using_lda(
                user_id, users_watched_movies, movies_tags_vector, genome_tags)

            print_output_using(user_id, similarities, relevance_feedback)

        elif commands[0] == 'p3_task1c' and len(commands) > 1:
            user_id = int(commands[1])

            similarities = recommender_system_using_cp(
                user_id, users_watched_movies, movies_tags_vector,
                tag_movie_rating, tmr_tensor_info, genome_tags)

            print_output_using(user_id, similarities, relevance_feedback)

        elif commands[0] == 'p3_task1d' and len(commands) > 1:
            user_id = int(commands[1])

            similarities = recommender_system_using_ppr(
                user_id, users_watched_movies, movies_tfidf_tags_vector)

            print_output_using(user_id, similarities, relevance_feedback)

        elif commands[0] == 'p3_task1e' and len(commands) > 1:
            user_id = int(commands[1])

            similarities = recommender_system_combining_all(
                user_id, users_watched_movies, movies_tfidf_tags_vector,
                movies_tags_vector, tag_movie_rating, tmr_tensor_info,
                genome_tags)

            print_output_using(user_id, similarities, relevance_feedback)

        elif commands[0] == 'p3_task3' and len(commands) == 3:
            lsh_indexing(genome_tags, movie_names, movies_tags_vector,
                         int(commands[1]), int(commands[2]))
        elif commands[0] == 'p3_task5' and len(commands) > 1:
            labelled_movies = {}
            n = int(input("Enter number of labels: "))
            while (n > 0):
                label = input("Enter label: ")
                movie_data = input("Enter space separated movies for label "
                                   "" + label + ": ")
                movies = movie_data.split(" ")
                for i, m in enumerate(movies):
                    movies[i] = int(m)
                labelled_movies[label] = movies
                n -= 1

            if commands[1] == 'NN' and len(commands) > 2:
                recommender_system_for_labeling_movies(
                    movies_info, labelled_movies, genome_tags,
                    movies_tfidf_tags_vector, commands[1], int(commands[2]))
            elif commands[1] == 'SVM' or commands[1] == 'DT':
                recommender_system_for_labeling_movies(
                    movies_info, labelled_movies, genome_tags,
                    movies_tfidf_tags_vector, commands[1], 0)
        elif len(commands) > 1 and (commands[0] == 'reset'
                                    and commands[1] == 'wc'):
            WeightConstants.reset()
            print("WeightConstants data has been purged")
        else:
            help()
def update(session_id=None):
    """ Sync backbone model with appropriate database.  """

    current_app.logger.info("PUT /sync route with id: %s" % session_id)
    resp = None

    try:
        session = Session.query.filter_by(session_id=session_id).one()
    except SQLAlchemyError:
        resp = {"status": "bad request"}
        current_app.logger.error("DB error: Unique user not found.")

    # Check JSON validity
    if utils.check_valid_json(request.get_data()):
        valid_json = json.loads(request.get_data())
    else:
        resp = {"status": "bad request"}
        current_app.logger.error("Invalid JSON")

    current_app.logger.info(
        "Current trial: %s, session id: %s " %
        (valid_json['currenttrial'], valid_json['sessionid']))

    # For each trial, pass to appropriate parser, if not in db
    for json_trial in valid_json['data']:
        if session.exp_name == "category_switch":
            experiment_class = CategorySwitch
        elif session.exp_name == "keep_track":
            experiment_class = KeepTrack

        db_trial, new = db_utils.get_or_create(
            db.session,
            experiment_class,
            token=session.token,
            session_id=session.session_id,
            trial_num=json_trial['current_trial'])

        # If the trial is new, add data
        if new:
            db_trial.add_json_data(json_trial)
            db.session.commit()

    # For each event, pass to parser, if not in db
    for json_event in valid_json['eventdata']:
        db_event, new = db_utils.get_or_create(
            db.session,
            EventData,
            token=session.token,
            session_id=session.session_id,
            exp_name=session.exp_name,
            timestamp=utils.convert_timestamp(json_event['timestamp']))

        if new:
            db_event.add_json_data(json_event)
            db.session.commit()

    if valid_json['questiondata'] != {}:
        # For the QuestionData, pass to parser, if not in db
        db_ques, new = db_utils.get_or_create(db.session,
                                              QuestionData,
                                              token=session.token,
                                              session_id=session.session_id,
                                              exp_name=session.exp_name)
        db_ques.add_json_data(valid_json['questiondata'])
        db.session.commit()

    if resp is None:
        resp = {"status": "success"}

    return jsonify(**resp)
Beispiel #18
0
def update(session_id=None):
    """ Sync backbone model with appropriate database.  """

    current_app.logger.info("PUT /sync route with id: %s" % session_id)
    resp = None

    try:
        session = Session.query.filter_by(session_id=session_id).one()
    except SQLAlchemyError:
        resp = {"status": "bad request"}
        current_app.logger.error("DB error: Unique user not found.")

    # Check JSON validity
    if utils.check_valid_json(request.get_data()):
        valid_json = json.loads(request.get_data())
    else:
        resp = {"status": "bad request"}
        current_app.logger.error("Invalid JSON")

    current_app.logger.info(
        "Current trial: %s, session id: %s " %
        (valid_json['currenttrial'], valid_json['sessionid']))

    ## JAKE: This needs to be slightly customized to add your task
    ## However, most of the work will be in models.py
    # For each trial, pass to appropriate parser, if not in db
    for json_trial in valid_json['data']:
        if session.exp_name == "category_switch":
            experiment_class = CategorySwitch
        elif session.exp_name == "keep_track":
            experiment_class = KeepTrack
        elif session.exp_name == 'BART':
            experiment_class = BART

        db_trial, new = db_utils.get_or_create(
            db.session,
            experiment_class,
            gfg_id=session.gfg_id,
            session_id=session.session_id,
            trial_num=json_trial['current_trial'])

        # If the trial is new, add data
        if new:
            db_trial.add_json_data(json_trial)
            db.session.commit()

    ## JAKE: this part is for recording events that dataHandler (i.e. psiTurk),
    ## tracks, automatically
    # For each event, pass to parser, if not in db
    for json_event in valid_json['eventdata']:
        db_event, new = db_utils.get_or_create(
            db.session,
            EventData,
            gfg_id=session.gfg_id,
            session_id=session.session_id,
            exp_name=session.exp_name,
            timestamp=utils.convert_timestamp(json_event['timestamp']))

        if new:
            db_event.add_json_data(json_event)
            db.session.commit()

    ## JAKE: Don't worry about this now, you won't have any question data
    if valid_json['questiondata'] != {}:
        # For the QuestionData, pass to parser, if not in db
        db_ques, new = db_utils.get_or_create(db.session,
                                              QuestionData,
                                              gfg_id=session.gfg_id,
                                              session_id=session.session_id,
                                              exp_name=session.exp_name)
        db_ques.add_json_data(valid_json['questiondata'])
        db.session.commit()

    if resp is None:
        resp = {"status": "success"}

    return jsonify(**resp)
Beispiel #19
0
def get_frame_from_log(username, password, ip_address, port, channel, stream,
                        path_save_frame, log_path):
    """
    input:
        username, password: username and password to login to camera.
        ip_address, port, channel, stream: Addition info to get from camera. This is specific for our camera.
        path_save_frame: Path to save all got frames, number frame coressponding log files.
        log_path: Path to log files.
    output:
        None
    """
    # read all line and sort by time
    line_object, _ = read_log_file(log_path)
    timestamp_frame_start = line_object[0]['time']
    
    datetime_objects = convert_timestamp(int((timestamp_frame_start)/1000)).split("_")
    starting_year, starting_month, starting_day = datetime_objects[0], datetime_objects[1], datetime_objects[2]
    starting_hour, starting_minute, starting_second = datetime_objects[3], datetime_objects[4], datetime_objects[5]
    
    starting_second = int(float(starting_second))
    uri = f"rtsp://{username}:{password}@{ip_address}:{port}/Streaming/tracks/{channel}{stream}?starttime={format(int(starting_year), '04d')}{format(int(starting_month), '02d')}{format(int(starting_day), '02d')}T{format(int(starting_hour) - 7, '02d')}{format(int(starting_minute), '02d')}{str(format(int(starting_second), '02d'))}z"
    # uri = "rtsp://*****:*****@192.168.10.75:554/Streaming/tracks/101?starttime=20201111T111111z"
    print(uri)
    

    #video path
    cap = cv2.VideoCapture(uri)
    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
     
    # With webcam get(CV_CAP_PROP_FPS) does not work.
    # Let's see for ourselves.
    fps = 0
    if int(major_ver)  < 3 :
        fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
        print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
    else :
        fps = cap.get(cv2.CAP_PROP_FPS)
        print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
    count_frame = 0
    count_saved_frame = 1

    Path(path_save_frame).mkdir(parents=True, exist_ok=True)
    start_frame = 0
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        else:
            if start_frame < 30:
                start_frame += 1
                continue
            # cv2.imshow('frame', frame)
            #The received "frame" will be saved. Or you can manipulate "frame" as per your needs.
            timestamp_frame = timestamp_frame_start + (count_frame * (1000/fps))
            print(f"[INFO]-{timestamp_frame}-|-{line_object[0]['time']}-|-{len(line_object)}-----", end="\r") 
            
            name = f"rec_frame_{format(int(starting_year), '04d')}{format(int(starting_month), '02d')}{format(int(starting_day), '02d')}T{format(int(starting_hour), '02d')}{format(int(starting_minute), '02d')}{starting_second + count_frame * (1.0/fps)}_{timestamp_frame}.jpg"
            name = f"{count_saved_frame:04d}.jpg"
            # 1609900972.982348 | 
            # 1609901392.553
            # 1609913255.610
            # 1609901673
            # print(timestamp_frame, "-", line_object[0]['time'])
            if abs(timestamp_frame - line_object[0]['time']) < (1000 / (fps)):
                cv2.imwrite(os.path.join(path_save_frame,name), frame)
                count_saved_frame += 1
                del line_object[0]
                if len(line_object) == 0:
                    break
            count_frame += 1
        # if cv2.waitKey(20) & 0xFF == ord('q'):
        #     break
    cap.release()
    cv2.destroyAllWindows()