示例#1
0
def main():
    api = IMDB()

    # for movie in my_api.get_movies_near_you():
    #     print movie
    query = raw_input("Search: ")
    print len(api.search_movie(query))
示例#2
0
def main(data_path, output_path):
    start_time = time.time()
    db = IMDB()
    data = db.get_data()
    train_sets, val_sets = split_imdb_data(data, 0.3)

    # convert_to_tfrecord(train_sets, data_path, output_path, 'train')
    convert_to_tfrecord(val_sets, data_path, output_path, 'val')
    duration = time.time() - start_time
    print("Running %.3f sec All done!" % duration)
示例#3
0
    def selective_search_roidb(self, gt_roidb, append_gt=False):
        """
        get selective search roidb and ground truth roidb
        :param gt_roidb: ground truth roidb
        :param append_gt: append ground truth
        :return: roidb of selective search
        """
        cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        if append_gt:
            print 'appending ground truth annotations'
            ss_roidb = self.load_selective_search_roidb(gt_roidb)
            roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = self.load_selective_search_roidb(gt_roidb)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
示例#4
0
    def selective_search_roidb(self, gt_roidb, append_gt=False):
        """
        get selective search roidb and ground truth roidb
        :param gt_roidb: ground truth roidb
        :param append_gt: append ground truth
        :return: roidb of selective search
        """
        cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        if append_gt:
            print 'appending ground truth annotations'
            ss_roidb = self.load_selective_search_roidb(gt_roidb)
            roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = self.load_selective_search_roidb(gt_roidb)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
示例#5
0
def main(ARGS, device):
    """
  Prepares the datasets for training, and optional, validation and
  testing. Then, initializes the VAE model and runs the training (/validation)
  process for a given number of epochs.
  """
    data_splits = ['train', 'val']
    datasets = {
        split: IMDB(ARGS.data_dir, split, ARGS.max_sequence_length,
                    ARGS.min_word_occ, ARGS.create_data)
        for split in data_splits
    }
    pretrained_embeddings = datasets['train'].get_pretrained_embeddings(
        ARGS.embed_dim).to(device)
    model = VAE(
        datasets['train'].vocab_size,
        ARGS.batch_size,
        device,
        pretrained_embeddings=pretrained_embeddings,
        trainset=datasets['train'],
        max_sequence_length=ARGS.max_sequence_length,
        lstm_dim=ARGS.lstm_dim,
        z_dim=ARGS.z_dim,
        embed_dim=ARGS.embed_dim,
        n_lstm_layers=ARGS.n_lstm_layers,
        kl_anneal_type=ARGS.kl_anneal_type,
        kl_anneal_x0=ARGS.kl_anneal_x0,
        kl_anneal_k=ARGS.kl_anneal_k,
        kl_fbits_lambda=ARGS.kl_fbits_lambda,
        word_keep_rate=ARGS.word_keep_rate,
    )
    model.to(device)

    optimizer = torch.optim.Adam(model.parameters())

    print('Starting training process...')

    amount_of_files = len(os.listdir("trained_models"))
    for epoch in range(ARGS.epochs):
        elbos = run_epoch(model, datasets, device, optimizer)
        train_elbo, val_elbo = elbos
        print(
            f"[Epoch {epoch} train elbo: {train_elbo}, val_elbo: {val_elbo}]")

        # Perform inference on the trained model
        with torch.no_grad():
            model.eval()
            samples = model.inference()
            print(*idx2word(samples,
                            i2w=datasets['train'].i2w,
                            pad_idx=datasets['train'].pad_idx),
                  sep='\n')

        model.save(f"trained_models/{amount_of_files + 1}.model")
示例#6
0
 def rpn_roidb(self, gt_roidb):
     """
     get rpn roidb and ground truth roidb
     :param gt_roidb: ground truth roidb
     :return: roidb of rpn (ground truth included)
     """
     if self.image_set != 'test':
         rpn_roidb = self.load_rpn_roidb(gt_roidb)
         roidb = IMDB.merge_roidbs(gt_roidb, rpn_roidb)
     else:
         print 'rpn database need not be used in test'
         roidb = self.load_rpn_roidb(gt_roidb)
     return roidb
示例#7
0
文件: pascal_voc.py 项目: 4ker/mxnet
 def rpn_roidb(self, gt_roidb):
     """
     get rpn roidb and ground truth roidb
     :param gt_roidb: ground truth roidb
     :return: roidb of rpn (ground truth included)
     """
     if self.image_set != 'test':
         rpn_roidb = self.load_rpn_roidb(gt_roidb)
         roidb = IMDB.merge_roidbs(gt_roidb, rpn_roidb)
     else:
         print 'rpn database need not be used in test'
         roidb = self.load_rpn_roidb(gt_roidb)
     return roidb
 def rpn_roidb(self, gt_roidb, append_gt=False):
     """
     get rpn roidb and ground truth roidb
     :param gt_roidb: ground truth roidb
     :param append_gt: append ground truth
     :return: roidb of rpn
     """
     if append_gt:
         print 'appending ground truth annotations'
         rpn_roidb = self.load_rpn_roidb(gt_roidb)
         roidb = IMDB.merge_roidbs(gt_roidb, rpn_roidb)
     else:
         roidb = self.load_rpn_roidb(gt_roidb)
     return roidb
示例#9
0
 def rpn_roidb(self, gt_roidb, append_gt=False):
     """
     get rpn roidb and ground truth roidb
     :param gt_roidb: ground truth roidb
     :param append_gt: append ground truth
     :return: roidb of rpn
     """
     if append_gt:
         print 'appending ground truth annotations'
         rpn_roidb = self.load_rpn_roidb(gt_roidb)
         roidb = IMDB.merge_roidbs(gt_roidb, rpn_roidb)
     else:
         roidb = self.load_rpn_roidb(gt_roidb)
     return roidb
示例#10
0
    def selective_search_roidb(self, gt_roidb):
        """
        get selective search roidb and ground truth roidb
        :param gt_roidb: ground truth roidb
        :return: roidb of selective search (ground truth included)
        """
        cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        if self.image_set != 'test':
            ss_roidb = self.load_selective_search_roidb(gt_roidb)
            roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = self.load_selective_search_roidb(None)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
示例#11
0
    def selective_search_roidb(self, gt_roidb):
        """
        get selective search roidb and ground truth roidb
        :param gt_roidb: ground truth roidb
        :return: roidb of selective search (ground truth included)
        """
        cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        if self.image_set != 'test':
            ss_roidb = self.load_selective_search_roidb(gt_roidb)
            roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = self.load_selective_search_roidb(None)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
示例#12
0
文件: imdb_rnn.py 项目: ami-GS/ngraph
                    help='type of recurrent layer to use (rnn or birnn)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 128
time_steps = 128
hidden_size = 10
gradient_clip_value = 15
embed_size = 128
vocab_size = 20000
pad_idx = 0

# download IMDB
imdb_dataset = IMDB(path=args.data_dir,
                    sentence_length=time_steps,
                    pad_idx=pad_idx)
imdb_data = imdb_dataset.load_data()

train_set = ArrayIterator(imdb_data['train'],
                          batch_size=args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(imdb_data['valid'], batch_size=args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = imdb_dataset.nclass

# weight initialization
init = UniformInit(low=-0.08, high=0.08)

if args.layer_type == "rnn":
示例#13
0
文件: command.py 项目: droidlife/ADT
    def search_query(self):
        query = raw_input('\nEnter the torrent to be searched : \t').strip().lower()

        if re.match(r'quit\b', query, flags=re.IGNORECASE):
            print '\nGoodbye...'
            sys.exit(0)

        elif re.match(r'search\b', query, flags=re.IGNORECASE):
            title_head = query[7:].strip()
            try:
                Download_Torrent().search_begins_kat(name=title_head, boolean=True)
                self.search_query()
            except:
                print '\nSorry torrent can not be downloaded.\nPlease Try again'
                self.search_query()

        elif re.match(r'add\b', query, flags=re.IGNORECASE):
            title_head = query[4:].strip()

            title_exist = False
            if os.path.isfile(os.path.expanduser(self.file_path)):
                file = open(os.path.expanduser(self.file_path), 'r')
                lines = file.readlines()
                file.close()
                for line in lines:
                    if line.__contains__(title_head):
                        title_exist = True
                if title_exist:
                    print '\n' + 'You already follow ' + title_head.upper()
                else:
                    file = open(os.path.expanduser(self.file_path), 'a')
                    season, age = IMDB().get_next_episode(title=title_head)
                    data = title_head + '/' + str(season) + '/' + str(age)
                    file.write(data + '\n')
                    file.close()
                    print '\n' + title_head.upper() + ' has been added.'
                    if age is not None:
                        print '\nThe new episode will air on ' + age
                        print '\n' + 'New torrent will be downloaded automatically.'
                    else:
                        print '\nThe new episode air date is not out yet.'
                        print '\nNew torrent will be downloaded when the air date arrives.'

            else:
                file = open(os.path.expanduser(self.file_path), 'a')
                season, age = IMDB().get_next_episode(title=title_head)
                data = title_head + '/' + str(season) + '/' + str(age)
                file.write(data + '\n')
                file.close()
                print '\n' + title_head.upper() + ' has been added.'
                if age is not None:
                    print '\nThe new episode will air on ' + age
                    print '\n' + 'New torrent will be downloaded automatically.'
                else:
                    print '\nThe new episode air date is not out yet.'
                    print '\nNew torrent will be downloaded when the air date arrives.'

            self.search_query()

        elif re.match(r'remove\b', query, flags=re.IGNORECASE):
            title_head = query[7:].strip()

            if os.path.isfile(os.path.expanduser(self.file_path)):
                file = open(os.path.expanduser(self.file_path), 'r')
                lines = file.readlines()
                file.close()
                file = open(os.path.expanduser(self.file_path), 'w')
                for line in lines:
                    if not line.__contains__(title_head):
                        file.write(line)
                file.write("\n")
                file.close()
                print 'Removed ' + title_head.upper()

            else:
                print '\nYou are not following ' + title_head.upper()
                print '\nUse the commnad ADD <title> to start following'

            self.search_query()

        elif re.match(r'info\b', query, flags=re.IGNORECASE):
            print '\nGetting information...'
            i = IMDB()
            title_head = query[5:].strip()
            title, season, age = i.get_latest_episode(title=title_head)

            print '\nCurrent Episode Details'
            if age is None:
                print '\n Information not available yet'
            else:
                print '\n Title : ' + str(title).upper()
                print '\n Episode : ' + str(season).upper()
                print '\n Air Date : ' + age

            season, age = i.get_next_episode(title=title_head)
            print '\nNext Episode details : '
            if age is None:
                print '\n Information not available yet'
            else:
                print '\n Episode : ' + str(season).upper()
                print '\n Air Date : ' + age

            self.search_query()

        elif re.match(r'print\b', query, flags=re.IGNORECASE):
            if os.path.isfile(os.path.expanduser(self.file_path)):
                file = open(os.path.expanduser(self.file_path), 'r')
                lines = file.readlines()
                file.close()
                for line in lines:
                    title = str(line).split('/')[0].strip()
                    print title
            else:
                print '\nYou are not following anything'
                print '\nUse the commnad ADD <title> to start following'

            self.search_query()

        elif re.match(r'top\b', query, flags=re.IGNORECASE):
            print '\nFetching data...'
            title_head = query[4:].strip()
            result = IMDB_TOP().top_items(type=title_head)
            if not result:
                print '\nSorry the following keyword is not present.\nPlease try again'
            self.search_query()
        else:
            try:
                Download_Torrent().search_begins_kat(name=query, boolean=True)
                self.search_query()
            except:
                print '\nSorry torrent can not be downloaded.\nPlease Try again'
                self.search_query()
示例#14
0
    blurayURL = [
        "https://www.blu-ray.com/movies/A-Star-Is-Born-Blu-ray/217109/"
    ]
config = ConfigParser()
config.read("conf.txt")
bdinfoPath = config["user_settings"]["bdinfo"]
tempDir = config["user_settings"]["output_dir"]

for url in blurayURL:
    blurayObj = Bluray(url)
    blurayObj.build()
    blurayObj.printAttrs()

    directory = Folder()
    directory.build(tempDir, blurayObj.title)

    imdbObj = IMDB()
    imdbObj.build(blurayObj.imdbLink, blurayObj.title, blurayObj.year,
                  blurayObj.runtime)
    imdbObj.printAttrs()

    tmdbObj = MovieDB()
    tmdbObj.build(blurayObj.imdbLink, directory.screenDir)
    tmdbObj.printAttrs()

    if args.bdinfo:
        bdiObj = BDInfo()
        bdiObj.build(args.bdinfo, directory.movieDir)
        print(bdiObj.prettyBDInfo)

    # templateObj = Template(url, blurayObj.title, blurayObj.year, etc.)
示例#15
0
"""
Created on Wed Sep 29 10:06:27 2021
ContentAI
@author: Herais
"""
#%% Import Libraries
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.command import Command
#%% instantiate imdb
from imdb import IMDB
imdb = IMDB()
#%%
df_mpm = imdb.get_most_popular_movies()

#%%
element_table = imdb.driver.find_element(By.XPATH, "//table")
#%%
elements_td = element_table.find_elements(By.CLASS_NAME, "posterColumn")
#%%

html = imdb.driver.page_source
#%%

#%% Get movie pages from table
path_wip = 'wip'
element_table = imdb.driver.find_element(By.XPATH, "//table")