Esempio n. 1
0
    def download_google_drive_file(self):
        paths = {}
        for mode in ['train', 'test']:
            candidates = []
            candidates.append('{}{}_{}'.format(self.task, self.max_length,
                                               mode))
            candidates.append('{}{}-{}_{}'.format(self.task, self.min_length,
                                                  self.max_length, mode))

            for key in candidates:
                for search_key in GOOGLE_DRIVE_IDS.keys():
                    if search_key.startswith(key):
                        path = os.path.join(self.data_dir, search_key)
                        tf.logging.info(
                            "Download dataset of the paper to {}".format(path))

                        if not os.path.exists(path):
                            download_file_from_google_drive(
                                GOOGLE_DRIVE_IDS[search_key], path)
                            if path.endswith('zip'):
                                with zipfile.ZipFile(path, 'r') as z:
                                    z.extractall(self.data_dir)
                        paths[mode] = path

        tf.logging.info("Can't found dataset from the paper!")
        return paths
Esempio n. 2
0
def get_model():
    path2model = os.path.join("models", "deeplabv3_resnet50.onnx")
    if not os.path.exists(path2model):
        file_id = '1OTQSLxy4Yn-ZTrB7EIFQlVWDKcQsEbZB'
        destination = 'models/deeplabv3_resnet50.onnx'
        download_file_from_google_drive(file_id, destination)
    inference = SegmentationInference(path2model)
    return inference
  def download_google_drive_file(self):
    paths = {}
    for mode in ['train', 'test']:
      candidates = []
      candidates.append(
          '{}{}_{}'.format(self.task, self.max_length, mode))
      candidates.append(
          '{}{}-{}_{}'.format(self.task, self.min_length, self.max_length, mode))

      for key in candidates:
        for search_key in GOOGLE_DRIVE_IDS.keys():
          if search_key.startswith(key):
            path = os.path.join(self.data_dir, search_key)
            tf.logging.info("Download dataset of the paper to {}".format(path))

            if not os.path.exists(path):
              download_file_from_google_drive(GOOGLE_DRIVE_IDS[search_key], path)
              if path.endswith('zip'):
                with zipfile.ZipFile(path, 'r') as z:
                  z.extractall(self.data_dir)
            paths[mode] = path

    tf.logging.info("Can't found dataset from the paper!")
    return paths
Esempio n. 4
0
args.niter = 1000
args.batch_size = 2
args.n_eval_samples = 5000
datasets = ['ffhq', 'cat', 'horse', 'church', 'car']
args.dataset = 'car'
args.path = '/content/results'

args.base_exp_name = 'car_1000_iter_lr_00001'
args.size = 1024 if args.dataset == 'ffhq' else 256
args.checkpoint = 'stylegan2-%s-config-f.pt' % args.dataset

from download import download_file_from_google_drive, gdrive_map
if not os.path.isfile(args.checkpoint):
    print('...Downloading checkpoint...')
    file_id = gdrive_map[args.dataset]
    download_file_from_google_drive(file_id, args.checkpoint)

args.channel_multiplier = 2
args.latent = 512
args.n_mlp = 8
args.device = 'cuda'

## TESTING
methods = ['TTTz', 'TTTw', 'TNet', 'TNet+TTT']
##### **********After TTTz finishes, you'll need to restart ******
#### I can't figure out why it crashes, but once you restart it should be fine
### JUST UNCOMMENT THE NEXT LINE
#methods = ['TTTw','TNet','TNet+TTT']
architectures = ['prelu', 'a', 'b', 'c', 'd', 'e', 'f']
for m in methods:
    if m in ['TTTz', 'TTTw']: