def scrape_tartans(args): """ Slowly, serially download images so as not to wear out our welcome. """ maybe_makedirs(args.output_path, exist_ok=True) print('Scraping tartans') # prepare list of ids to scrape, possibly resume work ids_to_scrape = load_state(args.state) if ids_to_scrape is None: ids_to_scrape = list(range(1, args.max_id)) random.shuffle(ids_to_scrape) errors = [] else: errors = load_state(args.errors) or [] num_processed = 0 while ids_to_scrape: page_id = ids_to_scrape.pop() url = args.url_template.format(page_id=page_id, width=args.size, height=args.size) print(url) filename = os.path.join(args.output_path, f'{page_id}.jpg') error = download_image_url(url, filename) if error: errors.append([page_id, error]) print(error) num_processed += 1 if num_processed % args.save_state_freq == 0: save_state(ids_to_scrape, args.state) save_state(errors, args.errors) # we're decent people who just want some images time.sleep(args.sleep)
def on_train_end(self, steps, logs): """Outputs the final value for each metric.""" output = dict(metrics=[ dict(name=key_to_kf_name(key), numberValue=float(values[-1])) for key, values in logs.items() if not self.whitelist or key in self.whitelist ]) dirname = os.path.dirname(self.args.metrics_path) if dirname: maybe_makedirs(dirname, exist_ok=True) with smart_open.open(self.args.metrics_path, 'w') as outfile: json.dump(output, outfile)
def on_train_end(self, steps, logs): """Outputs the final value for each metric.""" output = { key_to_kf_name(key): float(values[-1]) for key, values in logs.items() if not self.whitelist or key in self.whitelist } config = configparser.ConfigParser() config['metrics'] = output dirname = os.path.dirname(self.args.metrics_path) if dirname: maybe_makedirs(dirname, exist_ok=True) with smart_open.open(self.args.metrics_path, 'w') as outfile: config.write(outfile)
def run(self): set_device_from_args(self.args) self.load_generator() path = [] points = self.sample_z(self.args.num_points) for p_a, p_b in zip(points, torch.cat([points[1:], points[0:1]], dim=0)): # trim the final 1.0 value from the space: for i in np.linspace(0, 1, self.args.seg_frames + 1)[:-1]: path.append(slerp(i, p_a, p_b)) path = torch.tensor(np.stack(path)) imgs = self.g(path) if os.path.dirname(self.args.output_prefix): maybe_makedirs(os.path.dirname(self.args.output_prefix)) for i, img in enumerate(imgs): filename = f"{self.args.output_prefix}_{i}.png" self.save_image(img, filename)
def setup(self): set_device_from_args(self.args) self.load_generator(target=False) self.g = self.g.eval() self.load_disciminator() self.d = self.d.eval() img_size = self.g.max_size self.transform_input_image = transforms.Compose([ transforms.Resize(img_size, interpolation=Image.LANCZOS), transforms.RandomCrop((img_size, img_size)), transforms.ToTensor(), lambda x: x * 2 - 1 ]) if os.path.dirname(self.args.output_prefix): maybe_makedirs(os.path.dirname(self.args.output_prefix))
def run(self): set_device_from_args(self.args) self.load_generator() if os.path.dirname(self.args.output_prefix): maybe_makedirs(os.path.dirname(self.args.output_prefix)) path = [] if self.args.tile: grid = self.unmirrored_tiled_grid(self.args.num_points, self.args.num_points) else: grid = self.sample_latent_grid(self.args.num_points, self.args.num_points) grid_width, grid_height = grid.shape[:2] # grid_imgs = self.g(grid) # grid_imgs = grid_imgs.view( # grid.shape[:2] + grid_imgs.shape[-3:] # ) # grid_img_height, grid_img_width = grid_imgs.shape[-2:] grid_imgs = {} output_width, output_height = self.args.output_size, self.args.output_size output_img = torch.zeros(3, output_height, output_width) for y in range(output_height): print(f'Row {y}') grid_y = int(y * grid_height / output_height) if not grid_y in grid_imgs: row_imgs = self.g(grid[grid_y]) grid_imgs[grid_y] = row_imgs else: row_imgs = grid_imgs[grid_y] grid_img_height, grid_img_width = row_imgs.shape[-2:] img_y = int(y * grid_img_height / output_height) for x in range(output_width): grid_x = int(x * grid_width / output_width) img_x = int(x * grid_img_width / output_width) output_img[:, y, x] = row_imgs[grid_x, :, img_y, img_x] #output_img[:, y, x] = grid_imgs[grid_x, grid_y, :, img_y, img_x] filename = f"{self.args.output_prefix}_combined.png" self.save_image(output_img, filename)
def on_train_begin(self, steps, logs): maybe_makedirs(os.path.dirname(self.sample_root + '/'), exist_ok=True) self.progress_samples = self.trainer.sample_z(32)
def run(self): set_device_from_args(self.args) self.load_generator() self.setup_feature_extractor() self.g = self.g.eval() self.g.requires_grad_(False) if os.path.dirname(self.args.output_prefix): maybe_makedirs(os.path.dirname(self.args.output_prefix)) target_img = Image.open(self.args.target_image) target_img = self.transform_rgb_to_vgg(target_img) all_target_feats = [] with torch.no_grad(): for feature_extractor in self.feature_extractors: feats = feature_extractor(target_img[None, ...]) all_target_feats.append( feats.repeat(self.args.num_samples, 1, 1, 1)) # optimize z recon_loss = dict(mse=nn.MSELoss(reduction='sum'), l1=nn.SmoothL1Loss(reduction='sum'))[self.args.loss] z = self.sample_z(self.args.num_samples) z.requires_grad_(True) target_imgs = target_img.repeat(self.args.num_samples, 1, 1, 1) opt_class = { 'adam': optim.Adam, 'sgd': optim.SGD, 'lbfgs': optim.LBFGS, }[self.args.optimizer] optimizer = opt_class([z], self.args.lr) tqdm_iter = tqdm.tqdm(range(self.args.max_steps)) def train_step(): optimizer.zero_grad() imgs = self.g(z) vgg_imgs = torch.stack(list(map(self.transform_tanh_to_vgg, imgs))) if self.args.vgg: loss = 0. for feature_extractor, target_feats in zip( self.feature_extractors, all_target_feats): img_feats = feature_extractor(vgg_imgs) loss = loss + recon_loss(img_feats, target_feats) else: loss = recon_loss(vgg_imgs, target_imgs) # L2 regularization of latent code loss += z.pow(2).mean() * self.args.l2 loss.backward() self.save_image(imgs, f'{self.args.output_prefix}_{i}.png') return loss for i in tqdm_iter: # stocastic clipping https://openreview.net/pdf?id=HJC88BzFl with torch.no_grad(): should_clip = (torch.gt(z, 3) + torch.lt(z, -3)).float() clip_noise = torch.randn_like(z) z -= z * should_clip # zero out the clipped values z += clip_noise * should_clip loss = optimizer.step(train_step) z_min, z_mean, z_max = float(z.min()), float(z.mean()), float( z.max()) tqdm_iter.set_postfix(loss=float(loss), z_min=z_min, z_mean=z_mean, z_max=z_max)
def save_cache(self, filename): if os.path.dirname(filename): maybe_makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, 'wb') as outfile: pickle.dump(self._image_cache, outfile)