def setup_loggers_for_user(self, user): # Create directories if needed user_dir = path.join(ImageUtils.get_root(), 'content', user) ImageUtils.create_subdirectories(user_dir) # Setup logger self.logger = open(path.join(user_dir, 'history.log'), 'a') self.db.logger = self.logger ImageUtils.logger = self.logger self.reddit.logger = self.logger
def __init__(self): self.__leftLine, self.__rightLine = Line(LineType.left), Line( LineType.right) self.__nwindows = 8 # Set the width of the windows +/- margin self.__margin = 45 # Set minimum number of pixels found to recenter window self.__minpix = 50 self.__imageUtils = ImageUtils()
def add_existing_image(self, user, oldimage, oldpath, subdir='', album_id=-1): if 'tumblr' in oldpath: # Can't properly handle tumblr links self.debug('cannot properly handle tumblr links; trying anyway') #return if subdir == '' and album_id == -1: self.debug('adding image: %s' % oldpath) # Ensure image is an actual image try: dims = ImageUtils.get_dimensions(oldpath) except: self.debug('failed to load image: %s, skipping' % oldpath) return newimage = path.join(ImageUtils.get_root(), 'content', user, subdir, oldimage) newimage = newimage.replace('.jpeg.jpg', '.jpg') thumbnail = path.join(ImageUtils.get_root(), 'content', user, subdir, 'thumbs', oldimage) thumbnail = thumbnail.replace('.jpeg.jpg', '.jpg') if path.exists(newimage): self.debug('new image already exists: %s' % newimage) return ImageUtils.create_subdirectories(path.join(ImageUtils.get_root(), 'content', user, subdir, 'thumbs')) copy2(oldpath, newimage) try: ImageUtils.create_thumbnail(newimage, thumbnail) except Exception, e: self.debug('failed to create thumbnail: %s' % str(e)) thumbnail = path.join(ImageUtils.get_root(), 'images', 'nothumb.png')
def setup_loggers_for_user(self, user): # Create directories if needed user_dir = path.join(ImageUtils.get_root(), 'content', user) ImageUtils.create_subdirectories(user_dir) # Setup logger log_level = self.db.get_config('log_level', default='user') if log_level == 'none': self.logger = open(devnull, 'w') elif log_level == 'user': self.logger = open(path.join(user_dir, 'history.log'), 'a') elif log_level == 'global': self.logger = self.root_log self.db.logger = self.logger ImageUtils.logger = self.logger self.reddit.logger = self.logger
def get_poster(self, film_name, imdb_id): imageUtils = ImageUtils() image_path = '../res/hd_posters/{}.jpg' try: movie_url = self.search(film_name) image_url = self.get_image_url(movie_url) image_src = self.get_image_src(image_url) imageUtils.write_image_to_file(image_src, image_path.format(imdb_id)) except Exception as e: print('ERROR: Failed to find poster for: ', film_name) print(e) traceback.print_exc()
def start(self): stale_count = self.db.count('urls', 'pending != 0') if stale_count > 0: print 'MAIN: found %d stale (interrupted) URLs, marking as non-pending...' % stale_count self.db.update('urls', 'pending = 0') self.db.commit() print 'MAIN: starting infinite loop...' already_printed_sleep_msg = False while True: sleep(0.1) while len(self.results) > 0: # self.results is the list of downloaded medias to be added to the DB result = self.results.pop() self.handle_result(result) # Remove recently-completed rips while len(self.to_remove) > 0: (albumid, iindex) = self.to_remove.pop() self.db.delete('urls', 'album_id = ? and i_index = ?', [ albumid, iindex ] ) self.db.commit() try: # Get next URL to retrieve url = self.get_next_url() except Exception, e: if str(e) == 'no URLs found': if not already_printed_sleep_msg: already_printed_sleep_msg = True print 'MAIN: no urls to get, sleeping 500ms' sleep(0.5) else: print 'MAIN: get_next_url(): Exception: %s:\n%s' % (str(e), format_exc()) continue # We have a URL to download & add to DB (url) already_printed_sleep_msg = False # Wait for thread count to drop while len(self.current_threads) >= MAX_THREADS: sleep(0.1) self.current_threads.append(None) # Create new thread to download the media, add to self.results print 'MAIN: %s #%d: launching handler for: %s' % (url['path'], url['i_index'], url['url']) # Create subdirs from main thread to avoid race condition dirname = path.join(ImageUtils.get_root(), 'rips', url['path'], 'thumbs') ImageUtils.create_subdirectories(dirname) args = (url,) t = Thread(target=self.retrieve_result_from_url, args=args) t.start()
def __init__(self): # Single file that all output is written to, to track usage self.exit_if_already_started() self.root_log = open(path.join(ImageUtils.get_root(), 'history.log'), 'a') self.logger = self.root_log # Logger used by helper classes self.db = DB() # Database instance self.reddit = Reddit() self.excluded_subs = self.db.get_excluded_subreddits()
def get_imageurls(self, url, number_of_images): image_urls = [] imageObj = ImageUtils() #driver = webdriver.Chrome('./chromedriver') chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN") driver = webdriver.Chrome( executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options) driver.get(url) #driver.implicitly_wait(30) #scroll page to bottom imageObj.scroll_to_bottom(driver) thumbnailList = driver.find_elements_by_css_selector('img.Q4LuWd') print(len(thumbnailList)) #image_name = "mahi" for image in thumbnailList: #image_name = image_name + "s" image.click() time.sleep(0.5) imageurl = driver.find_elements_by_css_selector('img.n3VNCb') if (len(image_urls) >= number_of_images): break for i in imageurl: try: if ('http' in i.get_attribute('src')): img = i.get_attribute('src') print(i.get_attribute('src')) if img not in image_urls: image_urls.append(img) break #urllib.request.urlretrieve(img, "./downloadedImages/" + image_name + ".jpg") except: continue print("number-", len(image_urls)) return image_urls
def per_step(img, e, s): plt.subplot(1, 3, 1) ImageUtils.imshow(transferer.content_image, 'Content Image') plt.subplot(1, 3, 2) ImageUtils.imshow(img, f'New Image, epoch {e}, step {s}') plt.subplot(1, 3, 3) ImageUtils.imshow(transferer.style_image, 'Style Image') ImageUtils.flashplot() fig = plt.figure(1) fig.clf() ax = fig.subplots(nrows=2, ncols=1)
def __init__(self, content_path, style_path): """ Default constructor """ self.content_image = ImageUtils.grab_image(content_path) self.style_image = ImageUtils.grab_image(style_path) """ Load pre-trained VGG19 network architecture """ self.vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet') # Content layer where we will pull our feature maps self.content_layers = ['block5_conv2'] self.style_layers = [f'block{i}_conv1' for i in range(1, 6)] # Define a tf.Variable to contain the image to optimize. self.updateImage()
def delete_album(self, cursor, rowid, path): # Delete images cursor.execute('delete from medias where album_id = ?', [rowid]) # Delete pending URLs cursor.execute('delete from urls where album_id = ?', [rowid]) # Delete album cursor.execute('delete from albums where path = ?', [path]) # Delete directory + files path = ospath.join(ImageUtils.get_root(), path) rmtree(path)
def process_url(self, url, url_index, child): self.debug('process_url: %s' % url) userid = self.db.get_user_id(child.author) if type(child) == Post: base_fname = '%s-%d' % (child.id, url_index) postid = child.id commid = None elif type(child) == Comment: base_fname = '%s-%s-%d' % (child.post_id, child.id, url_index) postid = child.post_id commid = child.id working_dir = path.join(ImageUtils.get_root(), 'content', child.author) # A single URL can contain multiple medias (i.e. albums) try: (media_type, albumname, medias) = ImageUtils.get_urls(url) except Exception, e: self.debug('%s: process_url: unable to get URLs for %s: %s' % (child.author, url, str(e))) return
def remove_user(self, user): userid = self.get_user_id(user) user = self.select_one('username', 'users', where='id = ?', values=[userid]) self.delete('posts', 'userid = ?', [userid]) self.delete('comments', 'userid = ?', [userid]) self.delete('albums', 'userid = ?', [userid]) self.delete('users', 'username like ?', [user]) self.delete('newusers', 'username like ?', [user]) dirpath = path.join(ImageUtils.get_root(), 'content', user) if path.exists(dirpath): rmtree(dirpath)
class Ambilight: def __init__(self, led_num, border): self.NUMLED = led_num self.BORDER = border self.imageUtils = ImageUtils() self.currentColors = collections.OrderedDict() self.current_image_all = None def run(self): self.current_image_all = self.imageUtils.concatStripe(self.imageUtils.makeImagesOfCorners(self.BORDER)) self.currentColors = self.__calcColors() def __save(self, im, name): im.save(name, "PNG") def __calcColors(self): currentColors = collections.OrderedDict() for i, chunk in enumerate(self.imageUtils.splitImageIntoChunks(self.current_image_all, self.NUMLED), 0): currentColors[i] = ImageStat.Stat(chunk)._getmean() return currentColors
def get_posters(self, file): imageUtils = ImageUtils() image_path = '../res/hd_posters/{}.jpg' try: movie_list = open(file, 'r', encoding='utf-8') except: print('Could Not Open File: ', file) sys.exit() with movie_list: print('Reading From File: ', file) count = 0 for movie in movie_list: #Assume that the movie only exists once try: movie_url = self.search(movie) image_url = self.get_image_url(movie_url) image_src = self.get_image_src(image_url) imageUtils.write_image_to_file(image_src, image_path.format(count)) count = count + 1 except: pass
def add_existing_album(self, user, oldalbum, oldpath): newalbum = path.join(ImageUtils.get_root(), 'content', user, oldalbum) if path.exists(newalbum): self.debug('album already exists: %s' % newalbum) return (post, comment, imgid) = self.get_post_comment_id(oldalbum) url = 'http://imgur.com/a/%s' % imgid try: album_id = self.add_album(newalbum, user, url, post, comment) except Exception, e: self.debug('add_existing_album: failed: %s' % str(e)) return
def start(self): print "MAIN: starting infinite loop..." already_printed_sleep_msg = False while True: sleep(0.1) if len(self.results) > 0: # self.results is the list of downloaded medias to be added to the DB result = self.results.pop() self.handle_result(result) try: # Get next URL to retrieve url = self.get_next_url() except Exception, e: if str(e) == "no URLs found": if not already_printed_sleep_msg: already_printed_sleep_msg = True print "MAIN: no urls to get, sleeping 500ms" sleep(0.5) else: print "MAIN: get_next_url(): Exception: %s:\n%s" % (str(e), format_exc()) continue # We have a URL to download & add to DB (url) already_printed_sleep_msg = False # Wait for thread count to drop while len(self.current_threads) >= MAX_THREADS: sleep(0.1) self.current_threads.append(None) # Create new thread to download the media, add to self.results print "MAIN: %s #%d: launching handler for: %s" % (url["path"], url["i_index"], url["url"]) # Create subdirs from main thread to avoid race condition dirname = path.join(ImageUtils.get_root(), "rips", url["path"], "thumbs") ImageUtils.create_subdirectories(dirname) args = (url,) t = Thread(target=self.retrieve_result_from_url, args=args) t.start()
def process_url(self, url, url_index, child): self.debug('%s: process_url: %s' % (child.author, url)) # Ignore duplicate albums if self.db.album_exists(url): self.debug('''%s: process_url: album %s already exists in database. Permalink: %s Object: %s''' % (child.author, url, child.permalink(), str(child))) return userid = self.db.get_user_id(child.author) if type(child) == Post: base_fname = '%s-%d' % (child.id, url_index) postid = child.id commid = None elif type(child) == Comment: base_fname = '%s-%s-%d' % (child.post_id, child.id, url_index) postid = child.post_id commid = child.id working_dir = path.join(ImageUtils.get_root(), 'content', child.author) # A single URL can contain multiple medias (i.e. albums) try: (media_type, albumname, medias) = ImageUtils.get_urls(url) except Exception, e: self.debug('%s: process_url: unable to get URLs for %s: %s' % (child.author, url, str(e))) if 'domain not supported' in str(e): # Save domain-not-supported URLs to new file user_dir = path.join(ImageUtils.get_root(), 'content', child.author) f = open(path.join(user_dir, 'unsupported.txt'), 'a') f.write(url) f.write('\n') f.flush() f.close() return
def __init__(self): # Single file that all output is written to, to track usage self.exit_if_already_started() self.db = DB() # Database instance log_level = self.db.get_config('log_level', default='user') if log_level == 'none': self.root_log = open(devnull, 'w') else: self.root_log = open(path.join(ImageUtils.get_root(), 'history.log'), 'a') self.logger = self.root_log # Logger used by helper classes self.reddit = Reddit() self.excluded_subs = self.db.get_excluded_subreddits()
def poll_user(self, user): # Create directories if needed user_dir = path.join(ImageUtils.get_root(), 'content', user) ImageUtils.create_subdirectories(user_dir) # Setup logger self.logger = open(path.join(user_dir, 'history.log'), 'a') self.db.logger = self.logger ImageUtils.logger = self.logger self.reddit.logger = self.logger since_id = self.db.get_last_since_id(user) # Get posts/comments for user self.debug('%s: poll_user: since "%s"' % (user, since_id)) try: children = self.reddit.get_user(user, since=since_id) except Exception, e: if '404: Not Found' in str(e): # User is deleted, mark it as such self.debug('%s: poll_user: user is 404, marking as deleted' % user) self.db.mark_as_deleted(user) return self.debug('%s: poll_user: error %s' % (user, str(e))) return
def add_existing_album(self, user, oldalbum, oldpath): newalbum = path.join(ImageUtils.get_root(), 'content', user, oldalbum) if path.exists(newalbum): self.debug('album already exists: %s' % newalbum) return (post, comment, imgid) = self.get_post_comment_id(oldalbum) url = 'http://imgur.com/a/%s' % imgid try: album_id = self.add_album(newalbum, user, url, post, comment) except Exception as e: self.debug('add_existing_album: failed: %s' % str(e)) return for image in listdir(oldpath): self.debug('add_existing_album: image=%s' % path.join(oldpath, image)) fakeimage = post if comment != None: fakeimage = '%s-%s' % (fakeimage, comment) fakeimage = '%s_%s' % (fakeimage, image.split('_')[-1]) self.add_existing_image(user, fakeimage, path.join(oldpath, image), subdir=oldalbum, album_id=album_id) # Add post p = Post() p.id = post p.author = user if comment == None: p.url = url p.created = path.getctime(oldpath) p.subreddit = '' p.title = '' try: self.add_post(p, legacy=1) except Exception as e: #self.debug('add_existing_image: %s' % str(e)) pass # Add comment if comment != None: c = Comment() c.id = comment c.post_id = post c.author = user if comment != None: c.body = url p.created = path.getctime(oldpath) try: self.add_comment(c, legacy=1) except Exception as e: #self.debug('add_existing_image: %s' % str(e)) pass
def __init__(self): # Single file that all output is written to, to track usage self.exit_if_already_started() self.root_log = open(path.join(ImageUtils.get_root(), 'history.log'), 'a') self.logger = self.root_log # Logger used by helper classes self.db = DB() # Database instance self.reddit = Reddit() try: (username, password) = self.db.get_credentials('reddit') try: self.reddit.login(username, password) except Exception, e: self.debug('__init__: failed to login to reddit: %s' % str(e)) except Exception, e: self.debug('__init__: failed to get reddit credentials: %s' % str(e))
def __init__(self): # Single file that all output is written to, to track usage self.exit_if_already_started() self.db = DB() # Database instance log_level = self.db.get_config('log_level', default='user') if log_level == 'none': self.root_log = open(devnull, 'w') else: self.root_log = open( path.join(ImageUtils.get_root(), 'history.log'), 'a') self.logger = self.root_log # Logger used by helper classes self.reddit = Reddit() self.excluded_subs = self.db.get_excluded_subreddits()
def train_step(self, image, extractor, loss_function, opt, total_variation_weight=0): """ Mutates image as a step to conform to training """ with tf.GradientTape() as tape: outputs = extractor(image) loss = loss_function(outputs) loss += total_variation_weight * tf.image.total_variation(image) grad = tape.gradient(loss, image) opt.apply_gradients([(grad, image)]) image.assign(ImageUtils.clip_0_1(image))
def retrieve_result_from_url(self, url): # url contains album_id, index, url, type, path, and saveas # TODO logging into dirname/log.txt # Construct base result result = { 'album_id' : url['album_id'], 'i_index' : url['i_index'], 'url' : url['url'], 'valid' : 0, # 'error' : None, # 'type' : url['type'], 'image_name': url['saveas'], 'filesize' : 0, # 'width' : 0, # 'height' : 0, # 'thumb_name': None, # 't_width' : 0, # 't_height' : 0, # 'metadata' : url['metadata'], 'path' : url['path'] } # Get save directory dirname = path.join(ImageUtils.get_root(), 'rips', url['path']) # Generate save path saveas = path.join(dirname, url['saveas']) if path.exists(saveas): print 'THREAD: %s: removing existing file %s' % (url['path'], saveas) remove(saveas) try: meta = self.httpy.get_meta(url['url']) except Exception, e: # Can't get meta? Can't get image! print 'THREAD: %s: failed to get_meta from %s: %s\n%s' % (url['path'], url['url'], str(e), format_exc()) result['error'] = 'failed to get metadata from %s: %s\n%s' % (url['url'], str(e), format_exc()) self.to_remove.append( (url['album_id'], url['i_index'] ) ) self.results.append(result) self.current_threads.pop() return
def backfill_videos(): query = ''' select id, path, thumb from images where type = 'video' and ( thumb like '%.mp4' or thumb like '%.flv' or thumb like '%.wmv' ) ''' cur = db.conn.cursor() for imgid, image, oldthumb in cur.execute(query).fetchall(): saveas = oldthumb saveas = '%s.png' % saveas[:saveas.rfind('.')] try: newthumb = ImageUtils.create_thumbnail(image, saveas) except Exception as e: print('ERROR: %s' % str(e)) continue print('replacing %s with %s' % (oldthumb, newthumb)) q = ''' update images set thumb = ? where id = ? ''' cur.execute(q, (newthumb, imgid)) db.commit() print('removing %s...' % oldthumb), osremove(oldthumb) print('removed') cur.close()
def backfill_videos(): query = ''' select id, path, thumb from images where type = 'video' and ( thumb like '%.mp4' or thumb like '%.flv' or thumb like '%.wmv' ) ''' cur = db.conn.cursor() for imgid, image, oldthumb in cur.execute(query).fetchall(): saveas = oldthumb saveas = '%s.png' % saveas[:saveas.rfind('.')] try: newthumb = ImageUtils.create_thumbnail(image, saveas) except Exception, e: print 'ERROR: %s' % str(e) continue print 'replacing %s with %s' % (oldthumb, newthumb) q = ''' update images set thumb = ? where id = ? ''' cur.execute(q, (newthumb, imgid)) db.commit() print 'removing %s...' % oldthumb, osremove(oldthumb) print 'removed'
#!/usr/bin/python from os import listdir, path, walk from DB import DB from ImageUtils import ImageUtils db = DB() root = ImageUtils.get_root() ''' Iterates over existing sets, adds sets to database, attempts to populate DB with information based on filenames: * URL (http://i.imgur.com/<image> * Post ID * Comment ID * Creation time Copies existing set to new directory (/content/), Generates new thumbnails for the sets ''' def populate_db(): for user in listdir(path.join(root, 'users')): userdir = path.join(root, 'users', user) if not path.isdir(userdir): continue for item in listdir(userdir): itempath = path.join(userdir, item) if path.isfile(itempath): # Image #print "image: %s" % itempath db.add_existing_image(user, item, itempath) elif path.isdir(itempath):
# Album! albumname = '%s-%s' % (base_fname, albumname) working_dir = path.join(working_dir, albumname) #self.debug('%s: process_url: adding album to database' % child.author) album_id = self.db.add_album( working_dir, child.author, url, postid, commid, ) else: album_id = None if self.db.get_config('save_thumbnails', default='true') == 'true': ImageUtils.create_subdirectories(path.join(working_dir, 'thumbs')) else: ImageUtils.create_subdirectories(working_dir) for media_index, media in enumerate(medias): # Construct save path: /user/post[-comment]-index-filename fname = ImageUtils.get_filename_from_url(media, media_type) fname = '%s-%02d-%s' % (base_fname, media_index, fname) saveas = path.join(working_dir, fname) # Download URL try: self.debug('%s: process_url: downloading #%d %s' % (child.author, media_index + 1, media)) headers = { 'Referer' : url }
def start(self): ''' Overriding SiteBase's start() method for unique ripping logic ''' # We need a lot of libraries from ImageUtils import ImageUtils from calendar import timegm from shutil import copy2, rmtree from time import gmtime from os import path, walk, environ, getcwd from json import loads savedir = path.join('rips', self.path) if getcwd().endswith('py'): savedir = path.join('..', savedir) if self.album_exists: # Don't re-rip an album. Return info about existing album. return { 'warning' : 'album already exists', 'album_id' : self.album_id, 'album' : self.album_name, 'url' : self.url, 'host' : self.get_host(), 'path' : self.path, 'count' : self.db.count('medias', 'album_id = ?', [self.album_id]), 'pending' : self.db.count('urls', 'album_id = ?', [self.album_id]) } user = self.url.split(':')[-1] # Search for username (with proper case) on site gwapi = self.db.get_config('gw_api') if gwapi == None: raise Exception('unable to rip gonewild albums: gw_api is null') r = self.httpy.get('%s?method=search_user&user=%s' % (gwapi, user)) json = loads(r) found = False for jsonuser in json['users']: if jsonuser.lower() == user.lower(): found = True user = jsonuser break gwroot = self.db.get_config('gw_root') if gwroot == None: raise Exception('unable to rip gonewild albums: gw_root is null') userroot = path.join(gwroot, user) # Check if we can actually rip this user if not found or not path.exists(userroot): return { 'error' : 'unable to rip user (not archived)' } # Create subdirs ImageUtils.create_subdirectories(path.join(savedir, 'thumbs')) # Copy images to /rips/, get values that need to be inserted into db (insertmany) insertmany = [] already_got = [] filesize = 0 for root, subdirs, files in walk(userroot): if root.endswith('thumbs'): continue for filename in sorted(files): f = path.join(root, filename) n = filename if not root.endswith(userroot): # It's a subidr, save the file accordingly n = '%s_%s' % (root[root.rfind('/')+1:], filename) # Avoid duplicates no_post = n[n.rfind('_')+1:] if no_post in already_got: continue already_got.append(no_post) n = '%03d_%s' % (len(insertmany) + 1, n) saveas = path.join(savedir, n) # Copy & get size try: copy2(f, saveas) (width, height) = ImageUtils.get_dimensions(saveas) except Exception, e: # image can't be parsed, probably corrupt. move on. continue # Create thumbnail tsaveas = path.join(savedir, 'thumbs', n) try: (tsaveas, twidth, theight) = ImageUtils.create_thumbnail(saveas, tsaveas) except Exception, e: # Failed to create thumb tsaveas = '/'.join(['ui', 'images', 'nothumb.png']) twidth = theight = 160 filesize += path.getsize(saveas) # Add to list of values to insert into DB insertmany.append( [ self.album_id, # album_id, currently None len(insertmany) + 1, # i_index '', # url TODO 1, # valid None, # error SiteBase.get_type(saveas), # type n, # image_name width, # img width height, # img height path.getsize(saveas), # filesize path.basename(tsaveas), # thumb_name twidth, # thumb width theight, # thumb height None # metadata ] )
class Detector(object): def __init__(self): self.__leftLine, self.__rightLine = Line(LineType.left), Line( LineType.right) self.__nwindows = 8 # Set the width of the windows +/- margin self.__margin = 45 # Set minimum number of pixels found to recenter window self.__minpix = 50 self.__imageUtils = ImageUtils() def detect(self, binary_image, plot=False): warped_result = self.__imageUtils.perspective(binary_image) result = self.__imageUtils.luv_lab_filter(warped_result) self.__set_binary_image(result) if self.__leftLine.detected: self.quick_search(self.__leftLine) else: self.blind_search(self.__leftLine, plot) if self.__rightLine.detected: self.quick_search(self.__rightLine) else: self.blind_search(self.__rightLine, plot) left_fitx, left_fity = self.get_fit(self.__leftLine) right_fitx, right_fity = self.get_fit(self.__rightLine) result = self.__imageUtils.draw_on_origin_image( binary_image, left_fitx, right_fitx, left_fity, right_fity, plot) offset, mean_curv = self.car_pos() result = self.__imageUtils.wirte_on_processed_image( result, offset, mean_curv) return result def quick_search(self, line): """ Assuming in last frame, lane has been detected. Based on last x/y coordinates, quick search current lane. https://github.com/uranus4ever/Advanced-Lane-Detection/blob/master/Project.py """ allx = [] ally = [] if line.detected: win_bottom = 720 win_top = 630 while win_top >= 0: yval = np.mean([win_top, win_bottom]) xval = (np.median(line.current_fit[0])) * yval**2 + (np.median( line.current_fit[1])) * yval + (np.median( line.current_fit[2])) x_idx = np.where((((xval - 50) < self.__nonzerox) & (self.__nonzerox < (xval + 50)) & ((self.__nonzeroy > win_top) & (self.__nonzeroy < win_bottom)))) x_window, y_window = self.__nonzerox[x_idx], self.__nonzeroy[ x_idx] if np.sum(x_window) != 0: np.append(allx, x_window) np.append(ally, y_window) win_top -= 90 win_bottom -= 90 line.allx = allx line.ally = ally if np.sum(allx) == 0: self.detected = False # If no lane pixels were detected then perform blind search def blind_search(self, line, debug=False): allx = [] ally = [] base, window_bottom, window_top = self.__get_base(line.lineType) window_x_high, window_x_low = self.__get_x_low_high(base) x_idx, x_window, y_window = self.__get_xy_window( window_bottom, window_top, window_x_high, window_x_low) if debug: print(base, window_bottom, window_top, window_x_low, window_x_high) cv2.rectangle(self.__binary_image, (window_x_low, window_top), (window_x_high, window_bottom), (0, 255, 0), 2) if np.sum(x_window) != 0: allx.extend(x_window) ally.extend(y_window) if len(x_idx[0]) > self.__minpix: base = np.int(np.mean(x_window)) for window in range(1, self.__nwindows): window_bottom = window_top window_top = window_top - self.__window_height histogram = np.sum( self.__binary_image[window_top:window_bottom, :], axis=0) search_high = min(base + 100, 1280) search_low = max(base - 100, 0) x_move = np.argmax(histogram[search_low:search_high]) base = x_move if x_move > 0 else (search_high - search_low) // 2 base += search_low window_x_high, window_x_low = self.__get_x_low_high(base) x_idx, x_window, y_window = self.__get_xy_window( window_bottom, window_top, window_x_high, window_x_low) if np.sum(x_window) != 0: allx.extend(x_window) ally.extend(y_window) if len(x_idx[0]) > self.__minpix: base = np.int(np.mean(x_window)) if np.sum(allx) > 0: self.detected = True line.allx = allx line.ally = ally def get_fit(self, line): line.current_fit = np.polyfit(line.ally, line.allx, 2) line.current_bottom_x = line.current_fit[ 0] * 720**2 + line.current_fit[1] * 720 + line.current_fit[2] line.current_top_x = line.current_fit[2] line.bottom_x.append(line.current_bottom_x) #print("\nline.bottom_x = ", line.bottom_x); line.current_bottom_x = np.median(line.bottom_x) line.top_x.append(line.current_top_x) line.current_top_x = np.median(line.top_x) line.allx = np.append(line.allx, line.current_bottom_x) line.ally = np.append(line.ally, 720) line.allx = np.append(line.allx, line.current_top_x) line.ally = np.append(line.ally, 0) #print(line.lineType, " ", line.allx, " ", line.ally) sorted_idx = np.argsort(line.ally) #print(sorted_idx) line.allx = line.allx[sorted_idx] line.ally = line.ally[sorted_idx] line.current_fit = np.polyfit(line.ally, line.allx, 2) line.A.append(line.current_fit[0]) line.B.append(line.current_fit[1]) line.C.append(line.current_fit[2]) line.fity = line.ally line.current_fit = [ np.median(line.A), np.median(line.B), np.median(line.C) ] line.fitx = line.current_fit[0] * line.fity**2 + line.current_fit[ 1] * line.fity + line.current_fit[2] return line.fitx, line.fity def curvature(self, line): """ calculate curvature from fit parameter :param fit: [A, B, C] :return: radius of curvature (in meters unit) https://github.com/uranus4ever/Advanced-Lane-Detection/blob/master/Project.py """ ym_per_pix = 18 / 720 # meters per pixel in y dimension xm_per_pix = 3.7 / 700 # meters per pixel in x dimension fitx = line.current_fit[0] * self.__ploty**2 + line.current_fit[ 1] * self.__ploty + line.current_fit[2] y_eval = np.max(self.__ploty) # Fit new polynomials to x,y in world space fit_cr = np.polyfit(self.__ploty * ym_per_pix, fitx * xm_per_pix, 2) curved = ((1 + (2 * fit_cr[0] * y_eval * ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / \ np.absolute(2 * fit_cr[0]) return curved def car_pos(self): """ Calculate the position of car on left and right lane base (convert to real unit meter) :param left_fit: :param right_fit: :return: distance (meters) of car offset from the middle of left and right lane https://github.com/uranus4ever/Advanced-Lane-Detection/blob/master/Project.py """ xleft_eval = self.__get_eval(self.__leftLine) xright_eval = self.__get_eval(self.__rightLine) ym_per_pix = 18 / 720 # meters per pixel in y dimension xm_per_pix = 3.7 / abs( xleft_eval - xright_eval) # meters per pixel in x dimension xmean = np.mean((xleft_eval, xright_eval)) offset = (self.__binary_image.shape[1] / 2 - xmean) * xm_per_pix # +: car in right; -: car in left side left_curved = self.__get_curved(self.__leftLine.current_fit, xm_per_pix, ym_per_pix) right_curved = self.__get_curved(self.__rightLine.current_fit, xm_per_pix, ym_per_pix) mean_curv = np.mean([left_curved, right_curved]) return offset, mean_curv def __get_eval(self, line): return line.current_fit[0] * np.max( self.__ploty)**2 + line.current_fit[1] * np.max( self.__ploty) + line.current_fit[2] def __get_curved(self, fit, xm_per_pix, ym_per_pix): y_eval = np.max(self.__ploty) fitx = fit[0] * self.__ploty**2 + fit[1] * self.__ploty + fit[2] fit_cr = np.polyfit(self.__ploty * ym_per_pix, fitx * xm_per_pix, 2) curverad = ((1 + (2 * fit_cr[0] * y_eval * ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / \ np.absolute(2 * fit_cr[0]) return curverad def __get_x_low_high(self, base): window_x_low = max(base - self.__margin, 0) window_x_high = min(base + self.__margin, 1280) return window_x_high, window_x_low def __get_xy_window(self, window_bottom, window_top, window_x_high, window_x_low): x_idx = np.where(((window_x_low < self.__nonzerox) & (self.__nonzerox < window_x_high) & ((self.__nonzeroy > window_top) & (self.__nonzeroy < window_bottom)))) x_window, y_window = self.__nonzerox[x_idx], self.__nonzeroy[x_idx] return x_idx, x_window, y_window def __get_base(self, lineType): small_window_bottom = self.__binary_image.shape[0] small_window_top = self.__binary_image.shape[0] - self.__window_height small_window_histogram = np.sum( self.__binary_image[small_window_top:small_window_bottom, :], axis=0) all_histogram = np.sum(self.__binary_image[200:, :], axis=0) if lineType == LineType.right: base = (np.argmax(small_window_histogram[self.__midpoint:-60]) + self.__midpoint) \ if np.argmax(small_window_histogram[self.__midpoint:-60]) > 0 \ else (np.argmax(all_histogram[self.__midpoint:]) + self.__midpoint) else: base = np.argmax(small_window_histogram[:self.__midpoint]) \ if np.argmax(small_window_histogram[:self.__midpoint]) > 0 \ else np.argmax(all_histogram[:self.__midpoint]) return base, small_window_bottom, small_window_top def __set_binary_image(self, binary_image): self.__binary_image = binary_image nonzero = self.__binary_image.nonzero() self.__nonzeroy = np.array(nonzero[0]) self.__nonzerox = np.array(nonzero[1]) self.__window_height = np.int(self.__binary_image.shape[0] / self.__nwindows) self.__midpoint = np.int(self.__binary_image.shape[1] / 2) self.__ploty = np.linspace(0, self.__binary_image.shape[0] - 1, self.__binary_image.shape[0])
class ImageUtilsTest(unittest.TestCase): def setUp(self): self.image1 = Image.open('test1.png') # test2.png Breite: 200px # Hoehe: 220px self.image2 = Image.open('test2.png') self.imageUtils = ImageUtils() def test_makeImagesOfCorners_numbe_of_elements(self): images = self.imageUtils.makeImagesOfCorners(50) self.assertEqual(4, len(images)) def test_makeImagesOfCorners_size(self): border = 50 top, right, bottom, left = self.imageUtils.makeImagesOfCorners(border) self.assertEqual(top.size, (self.imageUtils.getScreenSize()[0], border)) self.assertEqual(right.size, (border, self.imageUtils.getScreenSize()[1])) self.assertEqual(bottom.size, (self.imageUtils.getScreenSize()[0], border)) self.assertEqual(left.size, (border, self.imageUtils.getScreenSize()[1])) def test_makeImagesOfCorners_small_border(self): with self.assertRaises(ValueError): self.imageUtils.makeImagesOfCorners(0) self.imageUtils.makeImagesOfCorners(-1) def test_concat_too_few(self): with self.assertRaises(ValueError): self.imageUtils.concat(()) self.imageUtils.concat((self.image1)) def test_concat_proper_size(self): images = (self.image2, self.image2) concated = self.imageUtils.concat(images) width_expected = 200 + 200 height_expected = 220 width_actual, height_actual = concated.size self.assertEqual(width_expected, width_actual) self.assertEqual(height_expected, height_actual) def test_concat_stripe_proper_size(self): images = (self.image2, self.image2) concated = self.imageUtils.concatStripe(images) width_expected = 220 + 220 height_expected = 200 width_actual, height_actual = concated.size # TODO: # self.assertEqual( width_expected, width_actual ) # self.assertEqual( height_expected, height_actual ) def test_split_images_into_chunks_number_of_elements(self): count = 4 chunks = self.imageUtils.splitImageIntoChunks(self.image2, count) self.assertEqual(count, len(chunks)) def test_split_images_into_chunks_width(self): count = 4 chunks = self.imageUtils.splitImageIntoChunks(self.image2, count) expected_width_per_chunk = self.image2.size[0] / count; [self.assertEqual(expected_width_per_chunk, image.size[0]) for image in chunks] def test_split_images_into_chunks_width2(self): count = 4 chunks = self.imageUtils.splitImageIntoChunks(self.image1, count) chunks.pop(len(chunks) - 1) expected_width_per_chunk = self.image2.size[0] / count; [image.show() for image in chunks] [self.assertEqual(expected_width_per_chunk, image.size[0]) for image in chunks]
'\n\t' + 'site text primary key, \n\t' + 'username text, \n\t' + 'password text \n\t', 'config' : '\n\t' + 'key text primary key, \n\t' + 'value text \n\t', 'friends' : '\n\t' + 'username text primary key\n\t', } DB_FILE = path.join(ImageUtils.get_root(), 'database.db') class DB: def __init__(self): self.logger = stderr if path.exists(DB_FILE): self.debug('__init__: using database file: %s' % DB_FILE) else: self.debug('__init__: database file (%s) not found, creating...' % DB_FILE) self.conn = None self.conn = sqlite3.connect(DB_FILE) #TODO CHANGE BACK, encoding='utf-8') self.conn.text_factory = lambda x: str(x, "utf-8", "ignore") # Don't create tables if not supplied. if SCHEMA != None and SCHEMA != {} and len(SCHEMA) > 0: # Create table for every schema given. for key in SCHEMA:
class Agent: # The default constructor for your Agent. Make sure to execute any # processing necessary before your Agent starts solving problems here. # # Do not add any variables to this signature; they will not be used by # main(). def __init__(self): pass # The primary method for solving incoming Raven's Progressive Matrices. # For each problem, your Agent's Solve() method will be called. At the # conclusion of Solve(), your Agent should return an integer representing its # answer to the question: "1", "2", "3", "4", "5", or "6". These integers # are also the Names of the individual RavensFigures, obtained through # RavensFigure.getName() (as Strings). # # In addition to returning your answer at the end of the method, your Agent # may also call problem.checkAnswer(int givenAnswer). The parameter # passed to checkAnswer should be your Agent's current guess for the # problem; checkAnswer will return the correct answer to the problem. This # allows your Agent to check its answer. Note, however, that after your # agent has called checkAnswer, it will *not* be able to change its answer. # checkAnswer is used to allow your Agent to learn from its incorrect # answers; however, your Agent cannot change the answer to a question it # has already answered. # # If your Agent calls checkAnswer during execution of Solve, the answer it # returns will be ignored; otherwise, the answer returned at the end of # Solve will be taken as your Agent's answer to this problem. # # Make sure to return your answer *as an integer* at the end of Solve(). # Returning your answer as a string may cause your program to crash. imageUtils = ImageUtils() doNotGuess = 0 def Solve(self, problem): # TODO: Implement voting print "problem name: " + problem.name if "Basic" not in problem.name: self.doNotGuess = 1 problem_figures = {} for figureName in problem.figures: figure = problem.figures[figureName] image = Image.open(figure.visualFilename).convert('1') problem_figures[figureName] = image strategy = self.chooseStrategy(problem_figures) if strategy == 'row_equals': for i in range(1, 9): if self.areEqual(problem_figures['H'], problem_figures[str(i)])[0]: return int(i) elif strategy == 'one_of_each': return self.applyOnfOfEachStrategy(problem_figures) elif strategy == 'one_cancels': return self.applyOneCancelsStrategy(problem_figures) elif strategy == 'cancel_out': return self.applyCancelOutStrategy(problem_figures) elif strategy == 'common_perms': return self.applyCommonPermsStrategy(problem_figures) elif strategy == 'productAB': return self.applyProductABStrategy(problem_figures) elif strategy == 'productAC': return self.applyProductACStrategy(problem_figures) elif strategy == 'diffAB': return self.applyDiffABStrategy(problem_figures) elif strategy == 'shared': return self.applySharedStrategy(problem_figures) else: return self.pick_the_one_not_seen(problem_figures) return -1 @staticmethod def areEqual(im1, im2): dif = sum(abs(p1 - p2) for p1, p2 in zip(im1.getdata(), im2.getdata())) ncomponents = im1.size[0] * im1.size[1] * 3 dist = (dif / 255.0 * 100) / ncomponents im1__getcolors = im1.getcolors() im2_getcolors = im2.getcolors() black1 = (10000, 0) if len(im1__getcolors) > 1: black, white = im1__getcolors else: if im1__getcolors[0][1] == 255: white = im1__getcolors black = (0, 0) else: black = im1__getcolors white = (0, 255) if len(im2_getcolors) > 1: black1, white1 = im2_getcolors else: if im2_getcolors[0][1] == 255: white1 = im2_getcolors black1 = (0, 0) else: black1 = im2_getcolors white1 = (0, 255) stats = {"dist": dist, "blk": abs(black[0] - black1[0])} return (dist < 1.1 and abs(black[0] - black1[0]) < 105), stats # return (dist<1.1 and abs(black[0]-black1[0])<105 and abs(white[0]-white1[0]<100)), stats def isShared(self, figures): sharedAB = self.imageUtils.compareImages(figures["A"], figures["B"])[0] sharedDE = self.imageUtils.compareImages(figures["D"], figures["E"])[0] return self.areEqual(sharedAB, figures["C"])[0] and self.areEqual( sharedDE, figures["F"])[0] def applySharedStrategy(self, figures): sharedGE = self.imageUtils.compareImages(figures["G"], figures["H"])[0] for i in range(1, 9): if self.areEqual(sharedGE, figures[str(i)])[0]: return int(i) else: return -1 def chooseStrategy(self, figures): # everyone is the same figures_a_ = figures['A'] figures_b_ = figures['B'] figures_c_ = figures['C'] figures_d_ = figures['D'] figures_e_ = figures['E'] figures_f_ = figures['F'] figures_g_ = figures['G'] figures_h_ = figures['H'] # overlays rowAB = ImageChops.add(figures_a_, figures_b_) rowBC = ImageChops.add(figures_b_, figures_c_) rowDE = ImageChops.add(figures_d_, figures_e_) rowEF = ImageChops.add(figures_e_, figures_f_) colAD = ImageChops.multiply(figures_a_, figures_d_) colADG = ImageChops.multiply(colAD, figures_g_) colBE = ImageChops.multiply(figures_b_, figures_e_) colBEH = ImageChops.multiply(colBE, figures_h_) #common permutations ab = ImageChops.multiply(figures_a_, figures_b_) ac = ImageChops.multiply(figures_a_, figures_c_) df = ImageChops.multiply(figures_d_, figures_f_) abc = ImageChops.multiply(ab, figures_c_) de = ImageChops.multiply(figures_d_, figures_e_) de_F = ImageChops.multiply(de, figures_f_) #difs difAB = self.imageUtils.invertGrayScaleImage( ImageChops.difference(figures_a_, figures_b_)) difDE = self.imageUtils.invertGrayScaleImage( ImageChops.difference(figures_d_, figures_e_)) if self.areEqual(figures_a_, figures_b_)[0] and self.areEqual( figures_b_, figures_c_)[0]: if self.areEqual(figures_d_, figures_e_)[0] and self.areEqual( figures_e_, figures_f_)[0]: return 'row_equals' elif ((self.areEqual(figures_a_, figures_d_)[0] or self.areEqual(figures_a_, figures_e_)[0] or self.areEqual(figures_a_,figures_f_)[0]) \ and (self.areEqual(figures_b_, figures_d_)[0] or self.areEqual(figures_b_, figures_e_)[0] or self.areEqual(figures_b_, figures_f_)[0]) \ and (self.areEqual(figures_c_, figures_d_)[0] or self.areEqual(figures_c_, figures_e_)[0] or self.areEqual(figures_c_, figures_f_)[0])): return 'one_of_each' elif self.areEqual(rowAB, rowBC)[0] and self.areEqual(rowDE, rowEF)[0]: return "one_cancels" elif self.areEqual(colADG, colBEH)[0]: return "cancel_out" elif self.areEqual(ab, figures_c_)[0] and self.areEqual( de, figures_f_)[0]: return "productAB" elif self.areEqual(ac, figures_b_)[0] and self.areEqual( df, figures_e_)[0]: return "productAC" elif self.areEqual(difAB, figures_c_)[0] and self.areEqual( difDE, figures_f_)[0]: return "diffAB" elif self.isShared(figures): return "shared" elif self.areEqual(abc, de_F)[0]: return "common_perms" def applyOnfOfEachStrategy(self, problem_figures): if self.areEqual(problem_figures['A'], problem_figures['G'])[0] or self.areEqual( problem_figures['A'], problem_figures['H'])[0]: if self.areEqual(problem_figures['B'], problem_figures['G'])[0] or self.areEqual( problem_figures['B'], problem_figures['H'][0]): if self.areEqual(problem_figures['C'], problem_figures['G'])[0] or self.areEqual( problem_figures['C'], problem_figures['H'])[0]: print "need to chose another strategy" else: missing_figure = 'C' else: missing_figure = 'B' else: missing_figure = "A" for i in range(1, 9): if self.areEqual(problem_figures[missing_figure], problem_figures[str(i)])[0]: return int(i) def applyOneCancelsStrategy(self, problem_figures): rowCF = ImageChops.add(problem_figures["C"], problem_figures["F"]) rowGH = ImageChops.add(problem_figures["G"], problem_figures["H"]) rowHF = ImageChops.multiply(problem_figures["H"], problem_figures["F"]) answers = {} for i in range(1, 9): candidate = ImageChops.add(rowCF, problem_figures[str(i)]) candidate2 = ImageChops.add(rowGH, problem_figures[str(i)]) if self.areEqual(rowCF, candidate)[0] and self.areEqual( rowGH, candidate2)[0]: answers[i] = problem_figures[str(i)] if len(answers) != 1: if self.isShared(problem_figures): return self.applySharedStrategy(problem_figures) else: return self.pick_the_one_not_seen(problem_figures) else: return answers.keys()[0] return -1 def applyCommonPermsStrategy(self, figures): de = ImageChops.multiply(figures["D"], figures["E"]) gh = ImageChops.multiply(figures["G"], figures["H"]) de_F = ImageChops.multiply(de, figures["F"]) for i in range(1, 9): candidate = ImageChops.multiply(gh, figures[str(i)]) if self.areEqual(candidate, de_F)[0]: return i return self.pick_the_one_not_seen(figures) def applyProductABStrategy(self, figures): gh = ImageChops.multiply(figures["G"], figures["H"]) for i in range(1, 9): if self.areEqual(gh, figures[str(i)])[0]: return i return self.pick_the_one_not_seen(figures) def applyProductACStrategy(self, figures): for i in range(1, 9): candidate = ImageChops.multiply(figures["G"], figures[str(i)]) if self.areEqual(candidate, figures["H"])[0]: return i return self.pick_the_one_not_seen(figures) def applyDiffABStrategy(self, figures): difGH = self.imageUtils.invertGrayScaleImage( ImageChops.difference(figures["H"], figures["G"])) for i in range(1, 9): if self.areEqual(figures[str(i)], difGH)[0]: return i return self.pick_the_one_not_seen(figures) def pick_the_one_not_seen(self, figures): figs = ["A", "B", "C", "D", "E", "F", "G", "H"] answers = [1, 2, 3, 4, 5, 6, 7, 8] for fig in figs: for i in range(1, 9): if self.areEqual(figures[fig], figures[str(i)])[0]: if i in answers: answers.remove(i) if len(answers) == 1: return answers[0] elif self.doNotGuess: return -1 return answers[0] def applyCancelOutStrategy(self, problem_figures): figures_a_ = problem_figures['A'] figures_c_ = problem_figures['C'] figures_d_ = problem_figures['D'] figures_f_ = problem_figures['F'] figures_g_ = problem_figures['G'] colAD = ImageChops.multiply(figures_a_, figures_d_) colADG = ImageChops.multiply(colAD, figures_g_) colCF = ImageChops.multiply(figures_c_, figures_f_) for i in range(1, 9): candidate = ImageChops.multiply(colCF, problem_figures[str(i)]) if self.areEqual(candidate, colADG)[0]: return int(i) return -1
print "THREAD: %s: failed to download %s to %s: %s\n%s" % ( url["path"], url["url"], saveas, str(e), str(format_exc()), ) result["error"] = "failed to download %s to %s: %s\n%s" % (url["url"], saveas, str(e), str(format_exc())) self.results.append(result) self.current_threads.pop() return # Save image info result["filesize"] = path.getsize(saveas) try: (result["width"], result["height"]) = ImageUtils.get_dimensions(saveas) except Exception, e: # This fails if we can't identify the image file. Consider it errored print "THREAD: %s: failed to identify image file %s from %s: %s\n%s" % ( url["path"], saveas, url["url"], str(e), format_exc(), ) result["error"] = "failed to identify image file %s from %s: %s\n%s" % ( saveas, url["url"], str(e), format_exc(), )
#!/usr/bin/python from os import listdir, path, walk from DB import DB from ImageUtils import ImageUtils db = DB() root = ImageUtils.get_root() ''' Iterates over existing sets, adds sets to database, attempts to populate DB with information based on filenames: * URL (http://i.imgur.com/<image> * Post ID * Comment ID * Creation time Copies existing set to new directory (/content/), Generates new thumbnails for the sets ''' def populate_db(): for user in listdir(path.join(root, 'users')): userdir = path.join(root, 'users', user) if not path.isdir(userdir): continue for item in listdir(userdir): itempath = path.join(userdir, item) if path.isfile(itempath): # Image #print "image: %s" % itempath db.add_existing_image(user, item, itempath)
print(" - Starting to import StyleTrans") import StyleTrans.StyleTrans print(" - Starting to import ImageUtils") import ImageUtils.ImageUtils print(" - Starting to import StyleContentModel") import StyleContentModel.StyleContentModel # Importing TensorFlow import tensorflow as tf plt.figure(figsize=(12, 6)) ImageUtils.enableflashplot() transferer = StyleTrans( content_path= 'https://gradschool.cornell.edu/wp-content/uploads/2018/07/JonPark.jpg', style_path= 'https://image.freepik.com/free-vector/abstract-dynamic-pattern-wallpaper-vector_53876-59131.jpg' ) # # Averaging two images # images = (transferer.content_image, transferer.style_image) # transferer.content_image = (images[0] + images[1])/2 # transferer.style_image = images[0] # transferer.updateImage()
def retrieve_result_from_url(self, url): # url contains album_id, index, url, type, path, and saveas # TODO logging into dirname/log.txt # Construct base result result = { "album_id": url["album_id"], "i_index": url["i_index"], "url": url["url"], "valid": 0, # "error": None, # "type": url["type"], "image_name": url["saveas"], "filesize": 0, # "width": 0, # "height": 0, # "thumb_name": None, # "t_width": 0, # "t_height": 0, # "metadata": url["metadata"], "path": url["path"], } # Get save directory dirname = path.join(ImageUtils.get_root(), "rips", url["path"]) # Generate save path saveas = path.join(dirname, url["saveas"]) if path.exists(saveas): print "THREAD: %s: removing existing file %s" % (url["path"], saveas) remove(saveas) meta = self.httpy.get_meta(url["url"]) if "imgur.com" in url["url"] and "Content-length" in meta and meta["Content-length"] == "503": print "THREAD: %s: imgur image was not found (503b) at %s" % (url["path"], url["url"]) result["error"] = "imgur image was not found (503b) at %s" % url["url"] self.results.append(result) self.current_threads.pop() return if "Content-type" in meta and "html" in meta["Content-Type"].lower(): print "THREAD: %s: url returned HTML content-type at %s" % (url["path"], url["url"]) result["error"] = "url returned HTML content-type at %s" % url["url"] self.results.append(result) self.current_threads.pop() return if meta["content-type"].lower().endswith("png"): # image/png result["type"] = "image" if not saveas.lower().endswith("png"): saveas = saveas[: saveas.rfind(".") + 1] + "png" elif meta["content-type"].lower().endswith("jpeg") or meta["content-type"].lower().endswith("jpg"): # image/jpg result["type"] = "image" if not saveas.lower().endswith("jpg"): saveas = saveas[: saveas.rfind(".") + 1] + "jpg" elif meta["content-type"].lower().endswith("gif"): # image/gif result["type"] = "image" if not saveas.lower().endswith("gif"): saveas = saveas[: saveas.rfind(".") + 1] + "gif" elif meta["content-type"].lower().endswith("mp4"): # video/mp4 result["type"] = "video" if not saveas.lower().endswith("mp4"): saveas = saveas[: saveas.rfind(".") + 1] + "mp4" elif meta["content-type"].lower().endswith("flv"): # video/flv result["type"] = "video" if not saveas.lower().endswith("flv"): saveas = saveas[: saveas.rfind(".") + 1] + "flv" elif meta["content-type"].lower().endswith("wmv"): # video/wmv result["type"] = "video" if not saveas.lower().endswith("wmv"): saveas = saveas[: saveas.rfind(".") + 1] + "wmv" result["image_name"] = path.basename(saveas) # Attempt to dowload image at URL try: self.httpy.download(url["url"], saveas) except Exception, e: print "THREAD: %s: failed to download %s to %s: %s\n%s" % ( url["path"], url["url"], saveas, str(e), str(format_exc()), ) result["error"] = "failed to download %s to %s: %s\n%s" % (url["url"], saveas, str(e), str(format_exc())) self.results.append(result) self.current_threads.pop() return
if albumname != None: # Album! albumname = '%s-%s' % (base_fname, albumname) working_dir = path.join(working_dir, albumname) #self.debug('%s: process_url: adding album to database' % child.author) album_id = self.db.add_album( working_dir, child.author, url, postid, commid, ) else: album_id = None ImageUtils.create_subdirectories(path.join(working_dir, 'thumbs')) for media_index, media in enumerate(medias): # Construct save path: /user/post[-comment]-index-filename fname = ImageUtils.get_filename_from_url(media) fname = '%s-%02d-%s' % (base_fname, media_index, fname) saveas = path.join(working_dir, fname) # Download URL try: self.debug('%s: process_url: downloading #%d %s' % (child.author, media_index + 1, media)) ImageUtils.httpy.download(media, saveas) if path.getsize(saveas) == 503: raise Exception('503b = removed') except Exception, e:
# Anime Style Images Dataset (Absolute and relative paths) # https://github.com/Mckinsey666/Anime-Face-Dataset # if platform == "darwin": # # Specifics for Vadim # path_to_pics = "../../cropped/" # else: # Specifics for Victor # path_to_pics = "../Anime-Face-Dataset/cropped/" path_to_pics = "./anime-images/" # Grab content image from link # content_image = ImageUtils.grab_image('https://gradschool.cornell.edu/wp-content/uploads/2018/07/JonPark.jpg') # content_image = ImageUtils.grab_image('http://www.mathcs.richmond.edu/~jdenny/Jory.jpg') content_image = ImageUtils.grab_image( 'https://facultystaff.richmond.edu/~dszajda/images/doug_small_website_photo_UR_Fall_2011.jpg' ) # Uses model from Tensor Flow Hub print(" - Loading pre-trained model from hub") hub_model = hub.load( 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1') print("\t> Finished") # Make list of style images and shuffle the list dirs = listdir(path_to_pics) shuffle(dirs) # Opens MatPlotLib figure fig = plt.figure(figsize=(12, 6)) fig = plt.gcf()
x1 = 0 y2 = 0 x2 = 0 y2 = 0 A = 0 B = 0 C = 0 # fileName = '/mnt/c/rrr/Рисунок (410).jpg' fileName = '/mnt/c/rrr/Рисунок (392).jpg' img = cv2.imread(fileName) # img = ImageUtils.resize_img(img, 2500.) img = ImageUtils.orientation_correction(img, False) ver_lines, hor_lines, table = TableDetection(img).run() height, width, depth = img.shape blank_image = ImageUtils.create_blank(width, height, (255, 255, 255)) img = img.copy() # for rows in table: # for row in rows: # x1, y1, w, h = row # img = cv2.rectangle(img, (int(x1), int(y1)), (w, h), (0, 255, 0), 5) # img = cv2.polylines(img, np.array([[25, 70], [25, 145]], np.int32), True, (0, 255, 0), 5) # cv2.imshow("blank_image", img) # cv2.waitKey()
def setUp(self): self.image1 = Image.open('test1.png') # test2.png Breite: 200px # Hoehe: 220px self.image2 = Image.open('test2.png') self.imageUtils = ImageUtils()
def __init__(self, led_num, border): self.NUMLED = led_num self.BORDER = border self.imageUtils = ImageUtils() self.currentColors = collections.OrderedDict() self.current_image_all = None
def add_existing_image(self, user, oldimage, oldpath, subdir='', album_id=-1): if 'tumblr' in oldpath: # Can't properly handle tumblr links self.debug('cannot properly handle tumblr links; trying anyway') #return if subdir == '' and album_id == -1: self.debug('adding image: %s' % oldpath) # Ensure image is an actual image try: dims = ImageUtils.get_dimensions(oldpath) except: self.debug('failed to load image: %s, skipping' % oldpath) return newimage = path.join(ImageUtils.get_root(), 'content', user, subdir, oldimage) newimage = newimage.replace('.jpeg.jpg', '.jpg') thumbnail = path.join(ImageUtils.get_root(), 'content', user, subdir, 'thumbs', oldimage) thumbnail = thumbnail.replace('.jpeg.jpg', '.jpg') if path.exists(newimage): self.debug('new image already exists: %s' % newimage) return ImageUtils.create_subdirectories(path.join(ImageUtils.get_root(), 'content', user, subdir, 'thumbs')) copy2(oldpath, newimage) try: ImageUtils.create_thumbnail(newimage, thumbnail) except Exception as e: self.debug('failed to create thumbnail: %s' % str(e)) thumbnail = path.join(ImageUtils.get_root(), 'images', 'nothumb.png') (post, comment, imgid) = self.get_post_comment_id(oldimage) url = 'http://i.imgur.com/%s' % imgid dims = ImageUtils.get_dimensions(newimage) size = path.getsize(newimage) try: ImageUtils.create_thumbnail(newimage, thumbnail) except Exception as e: self.debug('add_existing_image: create_thumbnail failed: %s' % str(e)) thumbnail = path.join(ImageUtils.get_root(), 'images', 'nothumb.png') try: self.add_image(newimage, user, url, dims[0], dims[1], size, thumbnail, 'image', album_id, post, comment) except Exception as e: self.debug('add_existing_image: failed: %s' % str(e)) return if subdir == '' and album_id == -1: # Not an album # Add post p = Post() p.id = post p.author = user if comment == None: p.url = url p.created = path.getctime(oldpath) p.subreddit = '' p.title = '' try: self.add_post(p, legacy=1) except Exception as e: self.debug('add_existing_image: create post failed: %s' % str(e)) # Add comment if comment != None: c = Comment() c.id = comment c.post_id = post c.author = user if comment != None: c.body = url p.created = path.getctime(oldpath) try: self.add_comment(c, legacy=1) except Exception as e: self.debug('add_existing_image: create comment failed: %s' % str(e))
'\n\t' + 'site text primary key, \n\t' + 'username text, \n\t' + 'password text \n\t', 'config' : '\n\t' + 'key text primary key, \n\t' + 'value text \n\t', 'friends' : '\n\t' + 'username text primary key\n\t', } DB_FILE = path.join(ImageUtils.get_root(), 'database.db') class DB: def __init__(self): self.logger = stderr if path.exists(DB_FILE): self.debug('__init__: using database file: %s' % DB_FILE) else: self.debug('__init__: database file (%s) not found, creating...' % DB_FILE) self.conn = None self.conn = sqlite3.connect(DB_FILE) #TODO CHANGE BACK, encoding='utf-8') self.conn.text_factory = lambda x: unicode(x, "utf-8", "ignore") # Don't create tables if not supplied. if SCHEMA != None and SCHEMA != {} and len(SCHEMA) > 0: # Create table for every schema given. for key in SCHEMA:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jun 11 11:59:23 2019 @author: ayeshabhatnagar """ from ImageUtils import ImageUtils import pandas as pd import numpy as np import math import matplotlib.pyplot as plt from matplotlib.pyplot import imread img_utils = ImageUtils() #TODO: Implement Singleton deriv_ = np.array([1, -2, 1]) def derivative_2nd(deriv, axis=1, in_file="images/cameraman.tif", out_file="x_deriv_cameraman.tiff"): """second derivative wrt x """ arr = img_utils.read_image(in_file) rows = arr.shape[0] cols = arr.shape[1] new_img = np.ones((rows, cols)) if axis == 0: print("HAHA") for m in range(rows):
def main(self): url='http://i.reddituploads.com/565bfd920d114339930a3e2407a5d967' ImageUtils.get_urls(url)