def post(): images = get_images.main() while images is None: images = get_images.main() impath, source, text = images with open(impath, "rb") as img: response = twitter.upload_media(media = img) message = f"{text} ({source})" out = twitter.update_status(status=message, media_ids=[response["media_id"]]) get_images.logging.info("Posted to twitter.")
def download(self, event): url = self.url.GetValue() dir_ = self.dir.GetValue() get_images.main(url, dir_)
from pages import git_pull_origin import get_gists_from_user as get_gists import get_images git_pull_origin.main() get_gists.main() get_images.main()
def main(mode, n_classes, im_size, prj_dir, feat_dir, classes, batch_size=50): print('Getting data...') train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True) valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True) test_datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True) images = get_images.main(feat_dir + r'\\train') train_datagen.fit(images) valid_datagen.fit(images) test_datagen.fit(images) del images train_batches = train_datagen.flow_from_directory(feat_dir + r'\\train', target_size=im_size, batch_size=batch_size, classes=classes) valid_batches = valid_datagen.flow_from_directory(feat_dir + r'\\valid', target_size=im_size, batch_size=batch_size, classes=classes) test_batches = test_datagen.flow_from_directory(feat_dir + r'\\test', target_size=im_size, batch_size=batch_size, classes=classes, shuffle=False) true_test = test_batches.classes test_names = test_batches.filenames # create log folder if not exists: if not os.path.exists(prj_dir + r'\\logs\\'): os.makedirs(prj_dir + r'\\logs\\') # create model folder if not exists: if not os.path.exists(prj_dir + r'\\model\\'): os.makedirs(prj_dir + r'\\model\\') log_dir = prj_dir + r"\\logs\\" + datetime.datetime.now().strftime( "%Y%m%d-%H%M%S") checkpoint_dir = prj_dir + r".\\model\\model.{epoch:02d}-{val_loss:.2f}.h5" input_shape = (im_size[0], im_size[1], 3) if mode is "train": print('Start training...') model = train(log_dir, checkpoint_dir, n_classes, train_batches, valid_batches, input_shape) print('Done training!') print('Start Testing...') pred = model.predict(x=test_batches, verbose=0) print('Done testing!') if mode is 'test': # get saved model: print('Import model...') list_of_files = glob.glob( prj_dir + r'\model\*') # * means all if need specific format then *.csv latest = max(list_of_files, key=os.path.getctime) # Create a new model instance model = create_model(n_classes, input_shape) # Load the previously saved weights model.load_weights(latest) print('Start Testing...') pred = model.predict(x=test_batches, verbose=0) print('Done testing!') return pred, true_test, test_names
def get_images_(args): import get_images get_images.main(args.owner_id)