Exemplo n.º 1
0
    def __init__(self, split, align=False, partition='all'):

        if not partition == 'all':
            name = 'person_' + partition + '_' + split
        elif partition == 'all':
            name = 'person_' + split

        if align and (partition == 'all' or partition == 'face'):
            name += '_align'

        Imdb.__init__(self, name)

        # Load two children dataset wrappers
        self._face = CelebA(split, align=align)
        self._clothes = DeepFashion(split)
        # The class list is a combination of face and clothing attributes
        self._classes = self._face.classes + self._clothes.classes
        self._face_class_idx = range(self._face.num_classes)
        self._clothes_class_idx = range(
            self._face.num_classes,
            self._face.num_classes + self._clothes.num_classes)

        # load data path
        self._data_path = os.path.join(self.data_path, 'imdb_PersonAttributes')
        # load the image lists and attributes.
        self._load_dataset(split, align, partition)
Exemplo n.º 2
0
	def main(self):
		if self.hash != None:
			t = threading.Thread(target = self.check)
			t.start()
			t.join()
			answer = self.queue.get()
			if answer != "Result failed" and answer != "Server failed":
				if self.language in answer:
					t = threading.Thread(target = self.download)
					t.start()
					t.join()
					subtitles = self.queue.get()
					if subtitles != "Malformed request":
						imdb = Imdb(self.name)
						information = imdb.main()
						if information != None:
							#agregar informacion a subtitulos
							subtitles = "00:00:1,0 --> 00:00:20,0\nTitle: " + information.title + "\n Director:" + information.director + "\n Year:" + information.year + "\n \n" + subtitles
							movie = Movie(title=information.title, director=information.director, year=information.year, hash=self.hash)
							movie.save()
						else:
							movie = Movie(title=self.name, hash=self.hash)
							movie.save()
						try:
							f = open("addsubs.srt",'w')
							f.write(subtitles.encode("utf-8"))
							f.close()
						except IOError:
							return None
						return movie
		return None
Exemplo n.º 3
0
def get_show_info(title):
    show = db.get_tv_show(title.lower())
    if show:
        return show.name
    imdb = Imdb()
    imdb.search(title)
    return imdb.title
Exemplo n.º 4
0
    def __init__(self, split):
        name = 'celeba_plus_webcam_cls_' + split
        Imdb.__init__(self, name)

        # object classes
        self._classes = ['Bald', 'Hat', 'Hair']

        # load image paths and annotations
        self._data_path = osp.join(self.data_path, 'imdb_CelebA+Webcam')
        self._load_dataset(split)
Exemplo n.º 5
0
    def __init__(self, split):
        name = 'mnist_' + split
        Imdb.__init__(self, name)

        # load image paths
        self._data_path = os.path.join(self.data_path, 'imdb_mnist')

        # attribute classes
        self._classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

        # load annotations
        self._load_dataset(split)
Exemplo n.º 6
0
    def __init__(self, split, align=False):
        name = 'celeba_' + split
        if align is True:
            name += '_align'

        Imdb.__init__(self, name)

        # attribute classes
        self._classes = \
            ['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young']

        # load image paths and annotations
        self._data_path = os.path.join(self.data_path, 'imdb_CelebA')
        self._load_dataset(split, align)
Exemplo n.º 7
0
    def __init__(self, split):
        name = 'IBMattributes_' + split
        Imdb.__init__(self, name)

        # attribute classes
        self._classes = [
            'Bald', 'Hat', 'Hair', 'Blackhair', 'Blondehair', 'Facialhair',
            'Asian', 'Black', 'White', 'NoGlasses', 'SunGlasses',
            'VisionGlasses'
        ]
        self._split = split

        # load image paths and annotations
        self._data_path = osp.join(self.data_path, 'imdb_IBMAttributes')
        self._load_config()
        self._load_dataset(split)
Exemplo n.º 8
0
    def __init__(self, split):
        name = 'deepfashion_' + split
        Imdb.__init__(self, name)

        # load image paths
        self._data_path = os.path.join(self.data_path, 'imdb_DeepFashion')

        # attribute classes
        self._classes = []
        self._class_types = []
        attr_file = os.path.join(self.data_path, 'Anno',
                                 'list_category_cloth.txt')
        with open(attr_file, 'r') as fid:
            # skip first two lines
            next(fid)
            next(fid)
            # read class list
            for line in fid:
                parsed_line = line.split()
                self._classes.append(' '.join(parsed_line[:-1]))
                self._class_types.append(int(parsed_line[-1]))

        # load annotations
        self._load_dataset(split)
Exemplo n.º 9
0
# evaluation model with pascal/voc measures
from __future__ import absolute_import, division, print_function
import os
from imdb import Imdb
from network import Network

data_dir = os.path.join(os.getcwd(), 'data')
anno_dir = os.path.join(data_dir, 'eval_annotation')
images_dir = os.path.join(data_dir, 'images')

imdb = Imdb(anno_dir, images_dir, batch_size=1)

net = Network(is_training=False)

for images, gt_boxes, gt_cls in imdb.next_batch():  # batch_size is 1
    box_pred, cls_inds, scores = net.predict(images)
Exemplo n.º 10
0
def run(path,proxy,username,password):
	log_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),"log_flashsubs.txt")
	if os.path.isfile(log_file) and os.path.getsize(log_file) < 10*1024*1024:
		log = open(log_file,"a")
	else:
		log = open(log_file,"w")
	
	try:
		subdb = SubDBAPI()
		if proxy:
			imdb = Imdb({'http':proxy})
		else:
			imdb = Imdb()

		opensub = opensubapi.OpenSubAPI(proxy)
		
		if username and password:
			try:
				token = opensub.login(username,password)
				assert type(token)==str
			except:
				print "Invalid username or password.\n"
				sys.exit()
			else:
				print "Login Successful\n"
		else:
			try:
				token = opensub.login()
				assert type(token)==str
			except:
				print "Check Internet Connection"
				sys.exit()

		#get file list
		movie_list = get_list(path)

		#Total Number of Movies
		num_movie = len(movie_list)
		
		#subtitle file for movies
		sub_list = [None]*num_movie
		
		#index number for movies whose imdbid are not yet found
		no_id_index = []
		
		#imdb id of movies in movie list
		imdb_id_list = [None]*num_movie
		
		#subtitles id for movies in opensubtitles.org
		open_subs_id = []
		
		#index for which subs are found in opensubtitles.org
		index_opensub = []
		
		print "Total Files - {}".format(num_movie)

		if num_movie ==0:
			opensub.logout()
			print "Nothing to do here!"
			sys.exit()


		#get imdb id for movies whose hash is present in opensubapi
		print "Searching"
		result = opensub.check_movie_list(movie_list)
		if result:
			for i in xrange(num_movie):
				if result[i]:
					imdb_id_list[i] = result[i]['MovieImdbID']

		print "Downloading Subs"
		subdb_download(result,num_movie,movie_list,sub_list,subdb)


		#get movies which are present in opensub database by name or hash
		print "\nSearching Subs"
		result = opensub.search_sub_list(movie_list)


		len_res = len(result)
		for i in xrange(num_movie):
			if i >= len_res or result[i] == None:
				if not imdb_id_list[i]:
					no_id_index.append(i)
			else:
				if not imdb_id_list[i]:
					imdb_id_list[i] = result[i]['IDMovieImdb']
							
				if not sub_list[i]:
					open_subs_id.append(result[i]['IDSubtitleFile'])
					index_opensub.append(i)
				
		#Download Subs which are found in opensubtitles database
		print "Downloading Subs"
		opensub_download(open_subs_id,index_opensub,sub_list,opensub)
		

		if len(no_id_index) != 0:
			print "\nSearching Info"
			no_id_file = []
			for index in no_id_index:
				no_id_file.append(movie_list[index])
			id_list = imdb.get_imdb_id(no_id_file)
			for ids,index in zip(id_list,no_id_index):
				imdb_id_list[index] = ids

		#get info from imdb about movies in movie list
		print "\nGetting Information"
		info_list = imdb.get_info(["tt"+"0"*(7-len(ids))+ids if ids else None for ids in imdb_id_list])

		#Now, sub_list = subtitles for corresponding movies in movie_list
		# info_list = info for correspoding movies in movie_list

		if len(info_list) != len(sub_list) != num_movie:
			print "ERROR"
			sys.exit()

		no_sub_imdb_id = []
		no_sub_imdb_id_index = []

		for i in xrange(num_movie):
			if imdb_id_list[i] != None and sub_list[i] == None:
				no_sub_imdb_id.append(imdb_id_list[i])
				no_sub_imdb_id_index.append(i)

		if opensub.get_down_lim() <= 0:
			print "Download Limit Reached!\nTry again after 24 hours\n"
		
		elif len(no_sub_imdb_id) != 0:
			conf = raw_input("\nWARNING: Experimental\nOnly proceed if the names of your file are not misleading \
like videoplayback, movie, s9e8 etc.\nDo you wish to use this feature?(y/n): ")
			if conf.lower() in ['yes','y']:
				opensub_download_id(no_sub_imdb_id,no_sub_imdb_id_index,sub_list,opensub)

		#Final - sub_list - subtitles of movies in movie_list
		#Final - info_list - info of movies in movie_list

		print "Saving"

		#dictionary with key as id of series and corresponding value as name of series.
		series_dict={}

		try:
			for num in xrange(num_movie):
				path = movie_list[num]
				base_path,name = os.path.split(path)
				base_name,ext = os.path.splitext(name)
				info = info_list[num]
				sub = sub_list[num]

				log.write("\nCurrently Processing - {}\n".format(path))

				try:
					title = normalize('NFKD',info['Title']).encode('ascii','ignore')
					new_name = title
				except:
					new_name = base_name
				else:
					if info['Type']=='series':
						continue
					elif info['Type'] == 'episode':
						Season = info['Season']
						Episode = info['Episode']
						if Season.isdigit() and Episode.isdigit():
							try:
								series_id = str(info['seriesID'])
								try:
									Series_name = series_dict[series_id]
								except:
									Series_name = normalize('NFKD',imdb.get_info([series_id])[0]['Title']).encode('ascii','ignore')
									if Series_name:
										series_dict[series_id] = Series_name
							except:
								Series_name = ""
							len_season = 2
							len_episode = 2
							if len(Episode) > len_episode:
								new_name = "{0} S{1:0>2}E{2} {3}".format(Series_name,str(Season),str(Episode),title)
							else:
								new_name = "{0} S{1:0>2}E{2:0>2} {3}".format(Series_name,str(Season),str(Episode),title)
							
				#Removing forbidden characters from files (Windows forbidden)
				new_name = str(new_name).translate(None,FORBIDDEN_CHARS).lstrip()

				#saving
				save(base_path,base_name,new_name,ext,sub,info,log)
		except:
			opensub.logout()
			raise

		log.write("-"*120)
		log.close()
		opensub.logout()
		print("\nDone!")
		print("Changes done are stored in {}\n".format(log_file))
		print(DEV_NOTE)

		if RUNNING_AS_WINDOW:
			raw_input("Press any key to exit...")
	
	except BaseException as e:
		if not type(e).__name__ in ["SystemExit","KeyboardInterrupt"]:
			print "\nOops...An Error Occured!\n"
			log.write("\nERROR:\n")
			log.write(traceback.format_exc())
			print "Error information stored in {}".format(log_file)
		print(DEV_NOTE)
		log.write("\n"+"-"*120)
		log.close()
		if RUNNING_AS_WINDOW:
			raw_input("Press any key to exit...")
		return
Exemplo n.º 11
0
#########################################################################################################################

if __name__ == "__main__":

	log_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),"log_flashsubs.txt")
	if os.path.isfile(log_file) and os.path.getsize(log_file) < 10*1024*1024:
		log = open(log_file,"a")
	else:
		log = open(log_file,"w")
	
	try:
		username,password,proxy,path = prog_init()

		subdb = SubDBAPI()
		if proxy:
			imdb = Imdb({'http':proxy})
		else:
			imdb = Imdb()

		opensub = opensubapi.OpenSubAPI(proxy)
		
		if username and password:
			try:
				token = opensub.login(username,password)
				assert type(token)==str
			except:
				print "Invalid username or password.\n"
				sys.exit()
			else:
				print "Login Successful\n"
		else:
Exemplo n.º 12
0
import time
from datetime import timedelta
from imdb import Imdb
from network import Network

data_dir = os.path.join(os.getcwd(), 'data')
anno_dir = os.path.join(data_dir, 'annotation')
images_dir = os.path.join(data_dir, 'images')

parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--learn_rate', type=float, default=1e-3)
args = parser.parse_args()

imdb = Imdb(anno_dir, images_dir,
            batch_size=args.batch_size)

net = Network(is_training=True, lr=args.learn_rate)

train_t = 0
step = 0

print('start training')

for epoch in range(1, args.num_epochs + 1):
    epoch_t = time.time()

    for images, gt_boxes, gt_cls in imdb.next_batch():
        step, bbox_loss, iou_loss, cls_loss = net.fit(images, gt_boxes, gt_cls)

        if step % 100 == 0: