def main(argv): all_images = False match = [] try: opts, args = getopt.getopt(argv[1:], "ham:", ["help", "all", "match="]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt == "--all": all_images = True elif opt in ("-m", "--match"): match.append(arg) if all_images: for i in get_images(): build(i) sys.exit(0) for m in match: for i in get_images(): if m in i: build(i) for a in args: if a not in get_images(): print("Can not find %s in available images" % a) else: build(a)
def test_get_images(credentials): filenames_to_write_imgs = { 'water': tempfile.mktemp(), } assert not os.path.exists(filenames_to_write_imgs['water']) images.get_images(filenames_to_write_imgs, credentials) if not os.path.exists(filenames_to_write_imgs['water']): raise ValueError('Image file for `water` wasn\'t written.')
def head_pose( drv: driver.ServingDriver, frame: np.ndarray, bboxes: np.ndarray, rgb=True, ): img_size = list(drv.inputs.values())[0][2] imgs = np.stack( images.get_images(frame, np.array(bboxes).astype(int), img_size, face_crop_margin=0, normalization=None, face_crop_margin_coef=MARGIN_COEF)) if rgb: # Convert to BGR. imgs = imgs[:, :, :, ::-1] input_name = list(drv.inputs.keys())[0] outputs = drv.predict({input_name: np.array(imgs).transpose([0, 3, 1, 2])}) yaw = -outputs["angle_y_fc"].reshape([-1]) pitch = -outputs["angle_p_fc"].reshape([-1]) roll = outputs["angle_r_fc"].reshape([-1]) # Return shape [N, 3] as a result return np.array([yaw, pitch, roll]).transpose()
def age_gender( drv: driver.ServingDriver, frame: np.ndarray, bboxes: np.ndarray, rgb=True, ): img_size = list(drv.inputs.values())[0][2] imgs = np.stack(images.get_images( frame, np.array(bboxes).astype(int), img_size, face_crop_margin=0, normalization=None, face_crop_margin_coef=MARGIN_COEF )) if rgb: # Convert to BGR. imgs = imgs[:, :, :, ::-1] input_name = list(drv.inputs.keys())[0] outputs = drv.predict({input_name: np.array(imgs).transpose([0, 3, 1, 2])}) age = (outputs["age_conv3"].reshape([-1]) * 100).round().astype(int) # gender: 0 - female, 1 - male gender = outputs['prob'].reshape([-1, 2]).argmax(1) return age, gender
def __init__(self, image_name, format_name, *groups): """Platform constructor. Setting an image to the platform and adding Sprite in needed sprite groups""" super().__init__(*groups) # setting an image self.image = get_images(image_name, format_name)[0] self.rect = self.image.get_rect()
def main(): """ Main entry point. """ parser = make_parser() args = parser.parse_args() if args.type == 'all': if scrapekit.confirm('scrape ALL categories'): scraping_urls = URLS.values() else: # Scrape one category scraping_urls = [URLS[args.type]] rows = get_rows(scraping_urls) original_count = len(rows) rows = process_rows(rows=rows, args=args) # Filework if args.images: img_dir = DATA_DIR + 'images_' + args.type + '/' images.get_images(rows, img_dir) # Create an image sheet images.mk_img_sheet(img_dir) exit() # Info and summary section if args.verbose: for r in rows: pprint(r) if not args.quiet: sep = '-' * 60 print('') print(sep.center(80)) print('SUMMARY'.center(80)) print('') print('Type selected: {}'.format(args.type)) print('Total rows scraped: {}'.format(original_count)) print('Total rows kept: {}'.format(len(rows))) # print('Total unique and known names: {}'.format(len(unique_names))) if args.format: write_file(rows, args)
def main(argv): images_path = os.path.join(os.path.expanduser("~"), '.local/share/contingious/storage/images') all_images = False match = [] if len(argv) == 1: usage() sys.exit(0) try: opts, args = getopt.getopt(argv[1:], "hd:am:", ["help", "images-path=", "all", "match="]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt == "--images-path": images_path = arg elif opt == "--all": all_images = True elif opt in ("-m", "--match"): match.append(arg) if all_images: for i in get_images(): build(i, images_path) sys.exit(0) for m in match: for i in get_images(): if m in i: build(i, images_path) for a in args: if a not in get_images(): print("Can not find %s in available images" % a) else: build(a, images_path)
def main(): parse = argparse.ArgumentParser( description='Spotify image gatherer and creator of collages') #parse.add_argument('-p', '--playlist', dest='playlist', type=str, help='Get all album art from a specific playlist') #parse.add_argument('-a', '--artist', dest='artist', type=str, help='Get album art from an artist, by default it grabs first 10') parse.add_argument('url', nargs='?') parse.add_argument( '-c', '--collage', action='count', default=0, help= 'Create a collage out of images gathered from "playlist" or "artist" argument.' ) parse.add_argument('-d', '--directory', dest='directory', type=str, help='Specify the a target directory to output results') parse.add_argument( '-v', '--verbose', action='count', default=0, help= 'See the program working instead of just believing that it is working') parse.add_argument('-z', '--zip', action='count', default=0, help='Output the directory into a zip file') args = parse.parse_args() if args.url is None: print('Spotify URL is required.') exit(1) c = args.collage d = args.directory v = args.verbose z = args.zip if not c and o: parse.print_help() print("\nCannot specify name of collage without -c flag") exit(1) directory = images.get_images(args.url, directory=args.directory, verbose=args.verbose, zip_this=args.zip) if c: collage.make_collage(directory=directory, verbose=args.verbose)
def main(argv): directory = "/".join(argv[0].split("/")[0:-1]) all_images = False match = [] if len(argv) == 1: usage() sys.exit(0) try: opts, args = getopt.getopt(argv[1:], "hd:am:", ["help", "directory=", "all", "match="]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() sys.exit() elif opt in ("-d", "--directory"): directory = arg elif opt == "--all": all_images = True elif opt in ("-m", "--match"): match.append(arg) if all_images: for i in get_images(): build(i, directory) sys.exit(0) for m in match: for i in get_images(): if m in i: build(i, directory) for a in args: if a not in get_images(): print("Can not find %s in available images" % a) else: build(a, directory)
def get_images_api(start_index): try: if start_index: start_index = int(start_index) else: start_index = 0 images = get_images(start_index=start_index) return jsonify(data=images) except: logging.exception("error") raise
def get_images_api(start_index): try: if start_index: start_index = int(start_index) else: start_index = 0 images = get_images(start_index=start_index) return jsonify(data=images) except: logging.exception("error") raise
def get_recommendations(self, emotion, choice): """ Retrieves recommendations for the user based on their emotion Args: emotion: the user's emotion/mood choice: whether the user wants calming(0) or relatable(1) recommendations """ print() print('Getting recommendations...') rec_dict = {} rec_dict[self.const.quote_key] = get_the_quote( self.const.quotes_emotions_mapping[emotion]) if choice == 1: rec_dict[self.const.song_key] = get_songs( self.const.music_emotion_mapping['relatable'][emotion]) rec_dict[self.const.movie_key] = get_movies( self.const.movie_emotion_mapping['relatable'][emotion]) rec_dict[self.const.image_key] = get_images( self.const.image_emotion_mapping['relatable'][emotion]) rec_dict[self.const.book_key] = get_books( self.const.book_emotion_mapping['relatable'][emotion]) else: rec_dict[self.const.song_key] = get_songs( self.const.music_emotion_mapping['soothe'][emotion]) rec_dict[self.const.movie_key] = get_movies( self.const.movie_emotion_mapping['soothe'][emotion]) rec_dict[self.const.image_key] = get_images( self.const.image_emotion_mapping['soothe'][emotion]) rec_dict[self.const.book_key] = get_books( self.const.book_emotion_mapping['soothe'][emotion]) print(rec_dict[self.const.song_key]) print(rec_dict[self.const.book_key]) print(rec_dict[self.const.movie_key]) print(rec_dict[self.const.image_key]) print(rec_dict[self.const.quote_key]) with open(self.const.recs_path, 'wb') as handle: pickle.dump(rec_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def usage(): usage_msg = "Usage: python3 build.py [OPTIONS] IMAGE1 [IMAGE2 ..]\n" \ "\n" \ "Pull podman images\n\n" \ "Options:\n" \ " -h, --help Display this message\n" \ " -a, --all Pull all the images\n" \ " -m, --match string Images that contains the string" \ "\n" \ "Images:\n %s" % "\n ".join(get_images()) print(usage_msg)
def usage(): usage_msg = "Usage: python3 import.py [OPTIONS] IMAGE1 [IMAGE2 ..]\n" \ "\n" \ "Build the lxc images\n\n" \ "Options:\n" \ " -h, --help Display this message\n" \ " --images-path path Base directory where to save images\n" \ " -a, --all Build all the images\n" \ " -m, --match string Images that contains the string" \ "\n" \ "Images:\n %s" % "\n ".join(get_images()) print(usage_msg)
def usage(): usage_msg = "Usage: python3 build.py [OPTIONS] IMAGE1 [IMAGE2 ..]\n" \ "\n" \ "Build the lxc images\n\n" \ "Options:\n" \ " -h, --help Display this message\n" \ " -d, --directory path Base directory for the building scripts (default: ./)\n" \ " -a, --all Build all the images\n" \ " -m, --match string Images that contains the string" \ "\n" \ "Images:\n %s" % "\n ".join(get_images()) print(usage_msg)
def load_python(artist): name, bio = load_wiki(artist) bio = bio.split('.', maxsplit=1)[0] url = get_images(artist, 1) pic_url = ''.join(url) artist_dict = { 'name': name, 'bio': bio, 'pics': pic_url, 'social_acc': 'social', } artist_record.insert_one(artist_dict) print('successfull')
def usage(): usage_msg = "Usage: python3 build.py [OPTIONS] IMAGE1 [IMAGE2 ..]\n" \ "\n" \ "Import container images from tarball export\n\n" \ "Options:\n" \ " -h, --help Display this message\n" \ " -d, --directory path Base directory in which the images a re stored (default: ./)\n" \ " -a, --all Build all the images\n" \ " -m, --match string Images that contains the string" \ "\n" \ "Images:\n %s" % "\n ".join(get_images()) print(usage_msg)
def main(): parse = argparse.ArgumentParser( description='Spotify image gatherer and creator of collages') parse.add_argument('url', nargs='?') parse.add_argument( '-c', '--collage', action='count', default=0, help= 'Create a collage out of images gathered from "playlist" or "artist" argument.' ) parse.add_argument('-d', '--directory', dest='directory', type=str, help='Specify the a target directory to output results') parse.add_argument( '-v', '--verbose', action='count', default=0, help= 'See the program working instead of just believing that it is working') parse.add_argument('-z', '--zip', action='count', default=0, help='Output the directory into a zip file') args = parse.parse_args() if args.url is None: print('Spotify URL is required.') exit(1) c = args.collage d = args.directory v = args.verbose z = args.zip directory = images.get_images(args.url, directory=args.directory, verbose=args.verbose, zip_this=args.zip) if c: collage.make_collage(directory=directory, verbose=args.verbose)
def __init__(self, player, *groups): super().__init__(*groups) self.clock = pygame.time.Clock() self.time = 0 self.groups = groups self.facing = player.player_facing self.r_animation = get_images(f'bullet{os.sep}bullet_0001...0008') self.l_animation = [ pygame.transform.flip(x, True, False) for x in self.r_animation ] self.current_animation = self.r_animation if self.facing == 'r' else self.l_animation self.image = self.current_animation[0] self.rect = self.image.get_rect() self.rect.y = randint(player.rect.y, player.rect.y + 100) self.rect.x = player.rect.x + 75 if self.facing == 'r' else player.rect.left - 75 self.rect.width = 150 self.rect.height = 50
def landmarks( drv: driver.ServingDriver, frame: np.ndarray, bboxes: np.ndarray, rgb=True, ): img_size = list(drv.inputs.values())[0][2] imgs = np.stack( images.get_images( frame, np.array(bboxes).astype(int), img_size, face_crop_margin=0, normalization=None, )) if rgb: # Convert to BGR. imgs = imgs[:, :, :, ::-1] input_name = list(drv.inputs.keys())[0] outputs = drv.predict({input_name: np.array(imgs).transpose([0, 3, 1, 2])}) landmarks = outputs["align_fc3"] landmarks = landmarks.reshape(landmarks.shape[0], -1, 2) sizes = np.array((bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1])).transpose() landmarks = landmarks * sizes.reshape((len(landmarks), 1, 2)) offset = bboxes[:, :2] # landmarks = landmarks + offset.transpose().reshape((len(landmarks), 1, 2)) landmarks = landmarks + np.resize(offset, (offset.shape[0], 1, offset.shape[1])) return landmarks
def main(argv=None): del argv # Parse input CSV file. Infer the expected format by peaking at the number # of elements in the first line. if _num_elements_in_first_row(FLAGS.input_file) == 1: single_words = parse_single_word_csv(FLAGS.input_file) english_words, translated_words = translate_lib.get_translations( single_words, credentials) logging.info('Translated %i words.' % len(translated_words)) translated_words_no_diacritics = translate_lib.strip_diacritics( translated_words) _check_unique(translated_words_no_diacritics) word_translation_pairs = WordTranslationPairs( zip(english_words, translated_words_no_diacritics, translated_words)) else: word_translation_pairs = parse_word_translation_csv(FLAGS.input_file) assert isinstance(word_translation_pairs, WordTranslationPairs) if not _all_unique(word_translation_pairs.english_words): raise ValueError('Not all words are unique.') if not _all_unique(word_translation_pairs.translations): raise ValueError('Not all translations are unique.') # Determine full filename where media should be written to. # If the media is already fetched, we still want to write it to the CSV, but # we don't want to fetch it. filenames_to_write_imgs = { word: os.path.join(FLAGS.output_dir, IMAGE_FILENAME_FORMAT.format(english=word)) for word in word_translation_pairs.english_words } filenames_to_write_auds = { word: os.path.join(FLAGS.output_dir, AUDIO_FILENAME_FORMAT.format(translation=word)) for word in word_translation_pairs.translations } # The media we write might be different than the media that we fetch, if # some media already exists. Make a copy # of the media list so we can track media to fetch, and remove entries # corresponding to media that already exists. if FLAGS.disable_image_fetching: filenames_to_fetch_imgs = {} else: filenames_to_fetch_imgs = { k: v for k, v in filenames_to_write_imgs.items() } filenames_to_fetch_auds = { k: v for k, v in filenames_to_write_auds.items() } # NOTE: This function modifies the first argument. if not FLAGS.override_images: remove_existing_filenames(filenames_to_fetch_imgs, FLAGS.output_dir) remove_existing_filenames(filenames_to_fetch_auds, FLAGS.output_dir) # Get images, and audio. # TODO(joelshor): Try to combine approaches and fetch media if it doesn't # exist. if FLAGS.already_downloaded_media_dir: images_lib.copy_images_from_disk(filenames_to_fetch_imgs, FLAGS.already_downloaded_media_dir) audio_lib.copy_audio_from_disk(filenames_to_fetch_auds, FLAGS.already_downloaded_media_dir) else: words_without_imgs = images_lib.get_images(filenames_to_fetch_imgs, credentials) words_without_audio = audio_lib.get_audio(filenames_to_fetch_auds, credentials) # Remove words without audio or image from flashcard list *to write to # csv*. for english_word in words_without_imgs: translated_word = word_translation_pairs.get_translation( english_word) logging.warning('Couldn\'t find image for: %s / %s', english_word, translated_word) for translated_word in words_without_audio: english_word = word_translation_pairs.get_english(translated_word) logging.warning('Couldn\'t find audio for: %s / %s', english_word, translated_word) english_words_to_remove = set(words_without_imgs).union( set([ word_translation_pairs.get_english(x) for x in words_without_audio ])) _remove_words(english_words_to_remove, filenames_to_write_auds, filenames_to_write_imgs, word_translation_pairs) # Sanity check that all files now exist. if not FLAGS.disable_image_fetching: _files_exist(filenames_to_write_imgs.values()) _files_exist(filenames_to_write_auds.values()) logging.info('Wrote media files to: %s', FLAGS.output_dir) # Write CSV that can be imported into an Anki deck. csv_rows = anki_import_csv.make_csv_format( word_translation_pairs.translation_dict, filenames_to_write_imgs, filenames_to_write_auds, extra_info=word_translation_pairs.extra_info) _write_csv_rows(csv_rows, FLAGS.output_csv_file) logging.warning('Wrote Anki import csv to: %s', FLAGS.output_csv_file)
def task_get_images(keyword): return images.get_images(keyword)
def __init__(self, *groups): super().__init__(*groups) # direction that player is facing self.player_facing = 'r' # all animation for player including directions self.idle_r = get_images(f'Idle{os.sep}cuphead_idle_0001...0005') self.idle_l = [ pygame.transform.flip(img, True, False) for img in self.idle_r ] self.run_normal_r = get_images( f'Run{os.sep}Normal{os.sep}cuphead_run_0001...00016') self.run_normal_l = [ pygame.transform.flip(img, True, False) for img in self.run_normal_r ] self.run_shoot_r = get_images( f'Run{os.sep}Shooting{os.sep}cuphead_run_shoot_0001...00016') self.run_shoot_l = [ pygame.transform.flip(img, True, False) for img in self.run_shoot_r ] self.shoot_r = get_images( f'Shoot{os.sep}cuphead_shoot_straight_0001...0003') self.shoot_l = [ pygame.transform.flip(img, True, False) for img in self.shoot_r ] self.jump_r = get_images( f'Jump{os.sep}Cuphead{os.sep}cuphead_jump_0001...0008') self.jump_l = [ pygame.transform.flip(img, True, False) for img in self.jump_r ] self.jump_sound = pygame.mixer.Sound('sfx/sfx_player_jump_01.wav') self.jump_sound.set_volume(0.1) self.shoot_sound = pygame.mixer.Sound( 'sfx/sfx_player_default_fire_loop_01.wav') self.shoot_sound.set_volume(0.2) self.shoot_sound_playing = False # current list of images self.current_animation = self.idle_r self.image = self.current_animation[0] self.rect = self.image.get_rect() self.running = True # speed of player self.speed_x = 0 self.speed_y = 0 self.level = None self.lives = 3 # used to manage interacting with the platforms self.in_air = False self.in_fall = True self.in_air_secs = 0 self.clock = pygame.time.Clock() self.time = 0 self.bullets = pygame.sprite.Group()
a = hist_back[tmp[0]][tmp[1]][tmp[2]] b = hist_fore[tmp[0]][tmp[1]][tmp[2]] if b <= a: test[i][col] = np.array([0, 128, 128]) test = cv2.cvtColor(test, cv2.COLOR_LAB2BGR) fore.append(test) return fore if __name__ == '__main__': dataset = 'A1train' trainset = 'A1train' labelset = 'A1' image_names, label_names = images.get_files_name(dataset, labelset, 'rgb') images, labels = images.get_images(image_names, label_names) #hist_fore, hist_back = get_3d_histogram(images, labels) hist_fore = np.load('hist_fore_' + trainset + '.npy') hist_back = np.load('hist_back_' + trainset + '.npy') fores = threshold_fore(images, hist_fore, hist_back) #for fore, name in zip(fores, image_names): #cv2.imwrite(name.replace('rgb', 'filted'), fore) ''' test = cv2.cvtColor(test, cv2.COLOR_LAB2BGR) imgs = np.hstack([init, test, mask]) cv2.imshow('', imgs) cv2.waitKey(0)
def ShowArtistPicture(parameters): artist = parameters.get('music-artist') urls = get_images(artist, 3) return urls