Пример #1
0
        writer = csv.DictWriter(f, fieldnames=fields)
        writer.writeheader()
        for item in result:
            writer.writerow(item)


if __name__ == '__main__':
    location = ['F:\\']
    extensions = EXTENSIONS

    # set up sqlalchemy database connection
    engine = create_engine('sqlite:///{}'.format(DATABASE))
    Base.metadata.bind = engine
    session = scoped_session(sessionmaker(bind=engine))

    klist = file_details(location, extensions)

    csv_output(klist, csv_file='file_result.csv')

    for movie in klist:
        try:
            attribs = session.query(MovieList).filter(
                and_(MovieList.Title == movie['title'], MovieList.Size == movie['size'])).first()

            if attribs.imdb_id:
                movie['imdb_id'] = attribs.imdb_id
            else:
                movie['imdb_id'] = None
            if attribs.Runtime:
                movie['runtime'] = attribs.Runtime
            else:
Пример #2
0
            if remain is not None and remain == '0':
                endtime = time.time()
                delta = endtime - starttime
                if delta < 10:
                    sleep = 11 - delta
                    print('0 requests left.  Sleeping for {} seconds.'.format(sleep))
                    time.sleep(sleep)
        finally:
            title = movie['title']
            req_resp[title] = search_result
            if remain is not None and remain == '0':
                endtime = time.time()
                if starttime is not None:
                    delta = endtime - starttime
                else:
                    delta = 0
                if delta < 10:
                    sleep = 11 - delta
                    print('0 requests left.  Sleeping for {} seconds.'.format(sleep))
                    time.sleep(sleep)

    # return {request_list, req_resp if response else request_list}
    return request_list

if __name__ == '__main__':
    enable_cache(expire=864000)
    movies = file_details(SEARCHDIRS, EXTENSIONS)
    mlist, stime = search_movie_with_year(movies, APIKEY, HEADERS, APIURL)
    final_list = append_response(mlist, APIKEY, HEADERS, APIURL, stime)
    upsert_db(final_list, DATABASE)