def search(self, s): allFilms = [] omdb_query = self.omdb + "s=" + s + "&apikey=" + self.api_key # http://www.omdbapi.com/?s=godfather&apiKey=6761b6e1 # http://www.omdbapi.com/?s=stem&apikey=6761b6e1 # http://www.omdbapi.com/?s=stem&apikey=6761b6e1&page=1 # print "search for omdb_query: " + omdb_query pages = self.pagination(omdb_query) print "pages: " + str(pages) for i in range(1, pages + 1): omdb_query_page = omdb_query + "&page=" + str(i) response = requests.get(omdb_query_page) jsoned_response = json.loads(response.text) print "omdb_query_page: " + omdb_query_page search_response = jsoned_response.get("Search") for j in range(1, len(search_response)): film = Film(search_response[j].get("Title"), search_response[j].get("Year"), search_response[j].get("imdbID"), search_response[j].get("Type")) film.print_film() allFilms.append(film) return allFilms
def _networkM3U8(self, url: str) -> None: url = url.replace("www.", "").removeprefix("https://") filmName = re.findall( r"[altadefinizione\..*|altadefinizionecommunity\.net]\/(.*/)", url) serieName = re.findall(r"seriehd\..*\/(.*/)", url) if filmName != []: out = self.output() self.root.destroy() film = Film(filmName[0], out) if film.init[0] == 'created': Downloader(film.outputFile) elif film.init[0] == "continuare": messagebox.showinfo("4K Downloader", "Download già iniziato, verrà ripreso") Downloader(film.outputFile) elif film.init[0] == "sostituire": s = messagebox.askyesno( "4K Downloader", "Nome già esistente ma i file sono diversi.\nSi -> Indicizza file\nNo -> Sostituisci file" ) if s: film.outputFile = film.outputFile.duplicate() else: film.outputFile.temp().remove() Data.delete(film.outputFile.name) Film.initialize(film.outputFile, film.m3u8Path) Downloader(film.outputFile)
def load(self): try: pickle_load = open(self.directors_file, "rb") self.directors = pickle.load(pickle_load) pickle_load.close() pickle_load = open(self.films_file, "rb") self.films = pickle.load(pickle_load) pickle_load.close() except Exception as ex: print("Can't load database, error: ", ex.args) self.directors.add( Director(0, "Ridley Scott", "79", "United Kingdom")) self.directors.add( Director(1, "Alexey Kiryushenko", "53", "Ukraine")) self.directors.add( Director(2, "Guy Ritchie", "49", "United Kingdom")) self.films.add(Film(0, 0, "Gladiator", "USA", "2000"), self.directors) self.films.add(Film(0, 1, "Alien", "USA", "1979"), self.directors) self.films.add(Film(1, 2, "Sluga Naroda", "Ukraine", "2015"), self.directors) self.films.add(Film(2, 3, "Sherlock Holmes", "USA", "2009"), self.directors) self.films.add( Film(2, 4, "Lock, Stock and Two Smoking Barrels", "United Kingdom", "1998"), self.directors) print("Default database has been created successful!")
def get_watchlist_films_for_user(self, user_id, bypass_data_store=False): page = self._fetch_url(self._build_user_watchlist_url(user_id)) soup = BeautifulSoup(page, 'html.parser') num_pages = int( soup.find_all('li', class_="paginate-page")[-1].find('a').text) films_index = {} # TODO async await for page_num in range(num_pages): url = self._build_user_watchlist_url(user_id) if (page_num != 0): url += f"/page/{page_num + 1}/" page = self._fetch_url(url) soup = BeautifulSoup(page, 'html.parser') film_divs = soup.find_all('div', class_="film-poster") for film_div in film_divs: new_film = Film({ 'id': film_div.get('data-film-slug'), 'name': film_div.find('img').get('alt'), }) new_film.save(self.ds) films_index[new_film.id] = new_film.name return films_index
def getFilmData(entrys): res = [] last = -1 for i in range(0, len(entrys)): line = entrys[i] if "lister-item mode-advanced" in line: # At the start of a new film entry if last != -1: res.append(Film.parse(entrys[last:i])) last = i res.append(Film.parse(entrys[last:])) return res
def add_movie(self): movie_title = self.le_movie_title.text() if not movie_title: return False else: movie = Film(movie_title) resultat = movie.add_to_movies() if resultat: list_item = QtWidgets.QListWidgetItem(movie.title) list_item.setData(QtCore.Qt.UserRole, movie) self.list_movies.addItem(list_item) self.le_movie_title.setText("")
def __init__(self, film_count, hall_count): # произвольное количество фильмов и кинозалов, # чтобы было, где смотреть "Крымский мост" self.films_list = [] self.halls_list = [] tmp_film_names = [] tmp_hall_names = [] tmp_film = None tmp_hall = None while len(self.films_list) != film_count: tmp_film = Film() if tmp_film.name not in tmp_film_names: self.films_list.append(tmp_film) tmp_film_names.append(tmp_film.name) while len(self.halls_list) != hall_count: tmp_hall = CinemaHall() if tmp_hall.name not in tmp_hall_names: self.halls_list.append(tmp_hall) tmp_hall_names.append(tmp_hall.name) # время закрытия кинотеатра self.close_time = datetime.datetime.combine( datetime.date.today(), datetime.time( 2, 0)) + datetime.timedelta(hours=24) self.queues = [] # список всех сеансов по всем залам for k in range(hall_count): self.queues.append(deque()) self.create_schedule()
def test_showtime_generator__zero_hour_zero_minute(self): date = datetime(month=12, day=14, year=2018) with self.assertRaises(FilmValidationException): Film(movie_title="Something About Mary", release_year="1992", mpaa_rating="R", run_time="0:00")
def test_showtime_generator__weekend__one_hour_forty_eight_minutes(self): date = datetime(month=12, day=14, year=2018) film = Film(movie_title="Something About Mary", release_year="1992", mpaa_rating="R", run_time="1:48") showtimes = ShowtimeGenerator(date, film).run() self.assertEqual(len(showtimes), 5)
def test_showtime_generator__twenty_hour(self): date = datetime(month=12, day=14, year=2018) film = Film(movie_title="Something About Mary", release_year="1992", mpaa_rating="R", run_time="20:00") showtimes = ShowtimeGenerator(date, film).run() self.assertEqual(len(showtimes), 0)
def get_film(self, row): soup = BeautifulSoup(row.prettify(), 'lxml') info = soup.find(attrs={'class': 'all'}) href = info['href'] name = info.get_text() film_id = href.split('/')[2] link = 'https://www.kinopoisk.ru' + href return Film(name, film_id, link)
def _localM3U8(self, _name) -> None: if _name != '' and self.m3u8Path != '': outputPath = self.output().add(f'{_name}.mp4') initialized = Film.initialize(outputPath, PathModel(self.m3u8Path)) if initialized == "created": self.root.destroy() Downloader(outputPath) else: print(initialized)
class Scene: def __init__(self): self.film = Film() self.sampler = TileSampler() self.ray_tracer = RayTracer() self.camera = Camera() def render(self): width = self.film.image.shape[1] height = self.film.image.shape[0] for sample in self.sampler.get_sample(): ray = self.camera.generate_ray(sample, width, height) color = self.ray_tracer.trace(ray, 0) self.film.commit(sample, color) if int(sample[1]) % 50 == 0: print('%s: (%d, %d)' % (self.film.filename, sample[0], sample[1])) self.film.write_image()
def downloadAndSaveFilmInfo(): conn = db.getConn() all = db.getAllFilmsUrls(conn) downloaded = db.getDownloadedFilmsUrls(conn) print len(all) print len(downloaded) for url in all - downloaded: soup = filmweb.getFilmSoup(url) f = Film(url, soup) db.saveFilmInfo(conn, f) conn.close()
def main(): date = datetime.now() header = True with open(argv[1], 'r') as infile: for line in infile: if header: header = False continue film = Film(*line.split(', ')) showtimes = ShowtimeGenerator(date, film).run() print(film) [print(showtime) for showtime in showtimes]
def get_film(db, main_film_id): # Get Film instance for given ID from database # @return: Film instance main_film = Film(main_film_id) vectors = main_film.build_vectors(db) if vectors is None: return None main_film.set_vectors(vectors) main_film.set_metadata(db) return main_film
def add_film(self): did = input("Director's id: ") fname = input("Films's name: ") fcountry = input("Films's country: ") fdate = input("Films's year: ") if did.isdecimal(): try: self.db.films.add( Film(int(did), self.db.films.generate_id(), fname, fcountry, fdate), self.db.directors) except Exception as ex: print(ex.args) else: print("Director's Id isn't right")
def filter_by_borough(borough): if app.debug: print request.args qry = """SELECT * FROM Film INNER JOIN Filmmaker ON Film.filmmaker_imdblink = Filmmaker.imdblink INNER JOIN FilmingLocations ON Film.imdblink = FilmingLocations.film_imdblink INNER JOIN NYCLocation ON (FilmingLocations.latitude = NYCLocation.latitude AND FilmingLocations.longitude = NYCLocation.longitude) WHERE NYCLocation.borough = :borough_str;""" cursor = g.conn.execute(text(qry), borough_str = borough) films = [] for result in cursor: films.append(Film(result)) cursor.close() cache['films'] = films return render_template("index.html", **cache)
def index(): if app.debug: print request.args if 'films' in cache and len(cache['films']) > 0: return render_template("index.html", **cache) # Default to list 30 films. cursor = g.conn.execute("""SELECT * FROM Film INNER JOIN Filmmaker ON Film.filmmaker_imdblink = Filmmaker.imdblink INNER JOIN FilmingLocations ON Film.imdblink = FilmingLocations.film_imdblink INNER JOIN NYCLocation ON (FilmingLocations.latitude = NYCLocation.latitude AND FilmingLocations.longitude = NYCLocation.longitude) LIMIT 30;""") films = [] for result in cursor: films.append(Film(result)) cursor.close() cache['films'] = films return render_template("index.html", **cache)
def filter_by_location(): if len(request.form['location']) < 1: return render_template("index.html", **cache) if app.debug: print request.args location = '%' + request.form['location'].lower() + '%' if app.debug: print location qry = """SELECT * FROM Film INNER JOIN Filmmaker ON Film.filmmaker_imdblink = Filmmaker.imdblink INNER JOIN FilmingLocations ON Film.imdblink = FilmingLocations.film_imdblink INNER JOIN NYCLocation ON (FilmingLocations.latitude = NYCLocation.latitude AND FilmingLocations.longitude = NYCLocation.longitude) WHERE LOWER(NYCLocation.address) LIKE :location_searchstring;""" cursor = g.conn.execute(text(qry), location_searchstring = location) films = [] for result in cursor: films.append(Film(result)) cursor.close() cache['films'] = films return render_template("index.html", **cache)
def create_schedule(self): next_film_number = 0 next_hall_number = 0 tmp_film = Film() films_list_len = len(self.films_list) halls_list_len = len(self.halls_list) if films_list_len == 0 or halls_list_len == 0: raise Exception( 'Ошибка формирования расписания: список фильмов и(или) список залов пуст' ) # заполняем расписание сеансами, пока не упремся в 2-00 следующего дня while True: # выбираем следующий зал с минимальным временем окончания предыдущего фильма tmp_dict = dict([(self.halls_list[l].last_film_end_time, l) for l in range(halls_list_len) ]) # { datetime: i, ...} next_hall_number = tmp_dict[min( tmp_dict.keys())] # d[min(datetime)] - i tmp_film = self.films_list[next_film_number] next_film_number += 1 if next_film_number == films_list_len: next_film_number = 0 if self.halls_list[next_hall_number].last_film_end_time +\ datetime.timedelta(minutes=tmp_film.duration)\ + datetime.timedelta(minutes=45) > self.close_time: for k in range(films_list_len): if self.halls_list[next_hall_number].last_film_end_time +\ datetime.timedelta(minutes=self.films_list[k].duration) +\ datetime.timedelta(minutes=45) < self.close_time: tmp_film = self.films_list[k] else: return self # формирование сеанса tmp_session = Session(self.halls_list[next_hall_number], tmp_film) self.queues[next_hall_number].append(tmp_session) # изменение времени окончания последнего фильма у соответствующего зала self.halls_list[ next_hall_number].last_film_end_time = tmp_session.end_time
def parser(): #init matrix M = [] #the file could not be here, so... try: raw = open("./peliculas100.dat") raw = file.readlines(raw) #trick: avoid append => performance boost M = [0] * len(raw) #each line in file is an entire film for id in xrange(len(raw)): #gets film's fields, and the field_parser function will parse them M[id] = Film() M[id].fromlist(map(field_parser, string.split(raw[id], "|"))) except IOError: #file not found print "[!] peliculas100.dat not found" return M
from flask import Flask, request, jsonify from flask_cors import CORS from film import Film app = Flask(__name__) CORS(app, resoures={r'/film*': {'origins': '*'}}) film_model = Film() @app.route('/film') def film(): name = request.args.get('name') # if key doesnt exist, returns none print('name parsed = ', name) if name == "": #print('parsed name is empty') return "empty", 200 result = film_model.find_film(str(name)) if result is not None: print('result is not none and is = \n', result) return jsonify(result) else: print('result = none') return "Film not found", 404 if __name__ == '__main__': app.run(debug=False)
def create_film_documents(): for o in Film.query().iter(): ModelSearch.add_document(ModelSearch.create_film_document( o.key.urlsafe(), o), index_name='films')
type = input('Enter the type of film: ') log = 'Entered type ' + type f.write(log + '\n') studios = input('Enter the studios name: ') log = 'Entered the studios ' + studios f.write(log + '\n') try: no_item = int(input('Enter the number of copies of the book: ')) log = 'Entered the no if items' + str(no_item) f.write(log + '\n') except ValueError: print('Sorry, enter an integer') items_all['film'][title] = Film(title, genre, price, year, type, studios, no_item) log = 'Added a film' + str(choose_type_add) f.write(log + '\n') print('Film added!') print() else: print('Invalid Input! Please try again!!! ') print(" Always! You must enter valid integer between 1, 2 and 3") print() try: other_add = int(input("""add another item? 1. yes 2. no\n"""))
#bourne_identity = Film("The Bourne Identity", 2002, "Action", 119) #furious_7 = Film("Furious 7", 2015, "Action", 137) #pain_and_gain = Film("Pain & Gain", 2013, "Comedy", 129) #session.add(bourne_identity) #session.add(furious_7) #session.add(pain_and_gain) #c1 = Cinema("123", "123") #session.add(c1) #Cinema cinema1 = Cinema("Kiev", "Velyka Vasylkivska Street, 19") cinema2 = Cinema("October", "Konstantinovskaya Street, 26") cinema3 = Cinema("Torch", "Mykola Bazhana Avenue, 3") #Film film1 = Film("InterStellar", 2014, "Fantastic", 168) film2 = Film("Joker", 2019, "Drama", 116) film3 = Film("Gentlemen", 2019, "Criminal", 113) #Session session1 = Session("2020-09-17", "Almandine", film1) session1.cinemas = [cinema1] session2 = Session("2020-09-17", "Ultramarine", film2) session2.cinemas = [cinema2] session3 = Session("2020-09-18", "Terracotta", film3) session3.cinemas = [cinema3] # 9 - persists data session.add(cinema1) session.add(cinema2)
def get_config(): parser = argparse.ArgumentParser(description='parser') parser.add_argument('--project', type=str, default='vqa') parser.add_argument( '--model', type=str, choices=['basern', 'rn', 'sarn', 'san', 'mrn', 'mlb', 'film']) data_arg = parser.add_argument_group('Data') data_arg.add_argument('--data-directory', type=str, default=os.path.join(home, 'data'), help='directory of data') data_arg.add_argument('--dataset', type=str) data_arg.add_argument('--input-h', type=int) data_arg.add_argument('--input-w', type=int) data_arg.add_argument('--top-k', type=int) data_arg.add_argument('--multi-label', action='store_true') data_arg.add_argument('--tokenizer', type=str, default=None) data_arg.add_argument('--text-max', type=int) train_arg = parser.add_argument_group('Train') train_arg.add_argument('--batch-size', type=int) train_arg.add_argument('--epochs', type=int) train_arg.add_argument('--lr', type=float) train_arg.add_argument('--lr-reduce', action='store_true') train_arg.add_argument('--weight-decay', type=float) train_arg.add_argument('--gradient-clipping', type=float) train_arg.add_argument('--log-directory', type=str, default=os.path.join(home, 'experiment'), metavar='N', help='log directory') train_arg.add_argument('--device', type=int, default=0, metavar='N', help='gpu number') train_arg.add_argument('--cpu-num', type=int, default=8, metavar='N', help='number of cpu') train_arg.add_argument('--multi-gpu', action='store_true') train_arg.add_argument('--gpu-num', type=int, default=4, metavar='N', help='number of cpu') train_arg.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') train_arg.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') train_arg.add_argument( '--timestamp', type=str, default=datetime.datetime.now().strftime("%y%m%d%H%M%S"), metavar='N', help='time of the run(no modify)') train_arg.add_argument('--memo', type=str, default='default', metavar='N', help='memo of the model') train_arg.add_argument('--load-model', type=str, default=None, help='load previous model') model_arg = parser.add_argument_group('Model') # Convolution model_arg.add_argument('--cv-pretrained', action='store_true') model_arg.add_argument('--cv-filter', type=int) model_arg.add_argument('--cv-kernel', type=int) model_arg.add_argument('--cv-stride', type=int) model_arg.add_argument('--cv-layer', type=int) model_arg.add_argument('--cv-batchnorm', action='store_true') # Text Encoder model_arg.add_argument('--te-pretrained', action='store_true') model_arg.add_argument('--te-type', type=str, choices=['gru', 'lstm']) model_arg.add_argument('--te-embedding', type=int) model_arg.add_argument('--te-hidden', type=int) model_arg.add_argument('--te-layer', type=int) model_arg.add_argument('--te-dropout', type=float) # film model_arg.add_argument('--film-res-kernel', type=int) model_arg.add_argument('--film-res-layer', type=int) model_arg.add_argument('--film-cf-filter', type=int) model_arg.add_argument('--film-fc-hidden', type=int) model_arg.add_argument('--film-fc-layer', type=int) # san model_arg.add_argument('--san-layer', type=int) model_arg.add_argument('--san-k', type=int) # basern model_arg.add_argument('--basern-gt-hidden', type=int) model_arg.add_argument('--basern-gt-layer', type=int) model_arg.add_argument('--basern-fp-hidden', type=int) model_arg.add_argument('--basern-fp-layer', type=int) model_arg.add_argument('--basern-fp-dropout', type=float) # rn model_arg.add_argument('--rn-gt-hidden', type=int) model_arg.add_argument('--rn-gt-layer', type=int) model_arg.add_argument('--rn-fp-hidden', type=int) model_arg.add_argument('--rn-fp-layer', type=int) model_arg.add_argument('--rn-fp-dropout', type=float) # rn model_arg.add_argument('--sarn-hp-hidden', type=int) model_arg.add_argument('--sarn-hp-layer', type=int) model_arg.add_argument('--sarn-gt-hidden', type=int) model_arg.add_argument('--sarn-gt-layer', type=int) model_arg.add_argument('--sarn-fp-hidden', type=int) model_arg.add_argument('--sarn-fp-layer', type=int) model_arg.add_argument('--sarn-fp-dropout', type=float) # mrn model_arg.add_argument('--mrn-hidden', type=int) model_arg.add_argument('--mrn-layer', type=int) # mln model_arg.add_argument('--mlb-hidden', type=int) model_arg.add_argument('--mlb-glimpse', type=int) args, unparsed = parser.parse_known_args() args = load_default_config(args) if not torch.cuda.is_available(): args.device = torch.device('cpu') else: torch.cuda.set_device(args.device) args.device = torch.device(args.device) args.data_config = [ args.input_h, args.input_w, args.cpu_num, args.cv_pretrained, args.top_k, args.multi_label, args.tokenizer, args.text_max ] config_list = [args.project, args.model, args.dataset, args.epochs, args.batch_size, args.lr, args.weight_decay, args.gradient_clipping, args.device, args.multi_gpu, args.gpu_num] + args.data_config + \ ['cv', args.cv_filter, args.cv_kernel, args.cv_stride, args.cv_layer, args.cv_batchnorm, 'te', args.te_pretrained, args.te_type, args.te_embedding, args.te_hidden, args.te_layer, args.te_dropout] train_loader = dataloader.load_dataloader(args.data_directory, args.dataset, True, args.batch_size, args.data_config) test_loader = dataloader.load_dataloader(args.data_directory, args.dataset, False, args.batch_size, args.data_config) args = load_dict(args) if args.model == 'film': config_list = config_list + \ ['film', args.film_res_kernel, args.film_res_layer, args.film_cf_filter, args.film_fc_hidden, args.film_fc_layer, args.memo] model = Film(args) elif args.model == 'san': config_list = config_list + \ ['san', args.san_layer, args.san_k, args.memo] model = San(args) elif args.model == 'basern': config_list = config_list + \ ['basern', args.basern_gt_hidden, args.basern_gt_layer, args.basern_fp_hidden, args.basern_fp_layer, args.basern_fp_dropout, args.memo] model = BaseRN(args) elif args.model == 'rn': config_list = config_list + \ ['rn', args.rn_gt_hidden, args.rn_gt_layer, args.rn_fp_hidden, args.rn_fp_layer, args.rn_fp_dropout, args.memo] model = RelationalNetwork(args) elif args.model == 'sarn': config_list = config_list + \ ['sarn', args.sarn_hp_hidden, args.sarn_hp_layer, args.sarn_gt_hidden, args.sarn_gt_layer, args.sarn_fp_hidden, args.sarn_fp_layer, args.sarn_fp_dropout, args.memo] model = Sarn(args) elif args.model == 'mrn': config_list = config_list + \ ['mrn', args.mrn_hidden, args.mrn_layer, args.memo] model = Mrn(args) elif args.model == 'mlb': config_list = config_list + \ ['mlb', args.mlb_hidden, args.mlb_glimpse, args.memo] model = Mlb(args) else: print("Not an available model.") args.config = '_'.join(map(str, config_list)) if args.load_model: args.log = os.path.join(args.log_directory, args.project, args.load_model) args.timestamp = args.load_model[:12] else: args.log = os.path.join(args.log_directory, args.project, args.timestamp + args.config) print(f"Config: {args.config}") return args, model, train_loader, test_loader
def get_film_rating(message): global film_name global film_rating film_rating = int(message.text) library.set_film(Film(film_name, film_rating)) bot.send_message(message.from_user.id, 'Рейтинг фильма успешно добавлен.')
def get_similar_films(db, main_film_id, weights=[1,1,1,1,1,1]): # Given a Film ID and feature weights, calculate distances of this film to all films in the database # using a batch_size of 100 films at a time # @return: Film instance of main film, list of Film instances of 100 nearest neighbours num_films = 100 main_film = Film(main_film_id) vectors = main_film.build_vectors(db) if vectors is None: return None else: main_film.set_vectors(vectors) main_film.set_metadata(db) similar_films = [] batch_size = 100 offset = 0 while True: vector_matrix = build_vector_matrix(db, batch_size, offset) if vector_matrix is None: break distance_matrix = calculate_distance_matrix(main_film.vectors, vector_matrix, weights) distance_matrix = [film for film in distance_matrix if int(film[0]) != int(main_film_id)] distance_matrix = [(distance_matrix[i] + (vector_matrix[i][1:],)) for i in range(0,len(distance_matrix)) if distance_matrix[i][0] == vector_matrix[i][0]] distance_matrix = sorted(distance_matrix, key=lambda x:x[1]) similar_films += distance_matrix[:num_films] offset += batch_size similar_films = sorted(similar_films, key=lambda x:x[1]) similar_films = similar_films[:num_films] similar_films_instances = [] for film_id, distance, vectors in similar_films: similar_film = Film(film_id) similar_film.set_distance(main_film_id, distance) similar_film.set_metadata(db) similar_film.set_vectors(vectors) similar_films_instances.append(similar_film) vector_matrix = [(film.id,) + film.vectors for film in similar_films_instances] for film in vector_matrix: distance_matrix = calculate_distance_matrix(film, vector_matrix, weights) for i, (film_id, dist) in enumerate(distance_matrix): if similar_films_instances[i].id != film_id: print("error!") similar_films_instances[i].set_distance(film[0], dist) return main_film, similar_films_instances
def can_buy_film_ticket(self, film: Film): return self.__available_money > film.get_ticket_price( ) and self.__age > film.get_min_allowed_age()