def main():

    from config import load_args
    args = load_args()
    load_data(args)


# if __name__ == '__main__':
#     main()
Example #2
0
def main():
    from config import load_args
    m = Model(load_args())

    for name, param in m.named_parameters():
        param.requires_grad = False

    for name, param in m.named_parameters():
        print(name, param.requires_grad)
Example #3
0
import tensorflow as tf
import numpy as np

from config import load_args
from lip_model.losses import cer
from lip_model.modules import embedding, sinusoid_encoding, multihead_attention, \
  feedforward, label_smoothing
from lip_model.visual_frontend import VisualFrontend
from util.tf_util import shape_list

config = load_args()


class TransformerTrainGraph():
    def __init__(self,
                 x,
                 y,
                 is_training=True,
                 reuse=None,
                 embed_input=False,
                 go_token_index=2,
                 chars=None):

        self.is_training = is_training
        self.x = x

        if config.featurizer:
            vid_inp = x[0] if type(x) is tuple or type(x) is list else x
            istarget = tf.not_equal(vid_inp, 0)
            self.padding_mask = tf.to_float(
                tf.reduce_any(istarget, axis=[2, 3, 4]))
Example #4
0
    discriminator = Discriminator(
        from_rgb_activate=not args.no_from_rgb_activate)
    g_running = StyleGAN()
    if args.cuda:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        g_running = g_running.cuda()
    g_running.train(False)

    g_optimizer = optim.Adam(generator.generator.parameters(),
                             lr=args.lr,
                             betas=(0., 0.99))
    g_optimizer.add_param_group({
        'params': generator.style.parameters(),
        'lr': args.lr * 0.01,
        'mult': 0.01
    })

    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0., 0.99))

    accumulate(g_running, generator, 0)

    train(args, train_loader, generator, g_running, discriminator, g_optimizer,
          d_optimizer)


if __name__ == '__main__':
    args = load_args()
    main(args)
Example #5
0
    checkpoint = torch.load(f)

state_dict = canonical_state_dict_keys(checkpoint['state_dict'])
model.load_state_dict(state_dict)
logger.info(f"Finished loading ckpt in {time.time() - tic:.3f}s")

logger.info(f"CUDA device count: {torch.cuda.device_count()}")
device_count = torch.cuda.device_count()
models = []
for device_ind in range(device_count):
    device = f"cuda:{device_ind}"
    models.append(copy.deepcopy(model).to(device))
    models[device_ind].eval()

from config import load_args
configdl = load_args()
graph_dict = {
    'train': TransformerTrainGraph,
    'infer': TransformerInferenceGraph,
}


def init_models_and_data(istrain):

    print('Loading data generators')
    val_gen, val_epoch_size = setup_generators()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(configdl.gpu_id)
    gpu_options = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    sess = tf.Session(config=sess_config)
def main():
    args = load_args()
    load_data(args)
Example #7
0
        im = PIL.Image.open(images[ii])
        im.thumbnail((300, 300))
        grid.paste(im, (0, 0))

        index = 0
        for i in range(300, 900, 300):
            for j in range(0, 900, 300):
                im = PIL.Image.open(closest[index])
                im.thumbnail((300, 300))
                grid.paste(im, (i, j))
                index += 1

        grid.save(
            f"{dataset_folder}/grids/{images[ii].split('/')[-1].split('.')[0]}.png"
        )


# generate_grids()

args = config.load_args("config/sim.json")

for ii, main_im in enumerate(tqdm(images)):
    for imfile in closest[ii]:
        args.style = f"{main_im},{imfile}"
        # print(args.style)
        style.img_img(args)
    for imfiles in itertools.combinations(closest[ii], 2):
        args.style = f"{main_im}{','.join(imfiles)}"
        style.img_img(args)
Example #8
0
        }
    else:
        ydl_opts = {
            'outtmpl': download_location,
            'quiet': 'true',
            'format': 'bestaudio/best',
        }
    # Download the shit
    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        ydl.download([link])
        print(download_location)


if __name__ == '__main__':
    # Load arguments
    args = config.load_args()
    config = config.load_config()
    # If custom location is set, use it
    if args.output:
        output = args.output
    # If no custom location is set: use project directory
    else:
        output = prog_path
    # If ID is given, try to download the specific ID, error if something goes wrong
    if args.ID:
        download_set(args.ID)
    # If list with ID's is provided, loop through file and try download for each setID
    if args.list:
        try:
            f = open(args.list, "r")
            for line in f:
Example #9
0
    growth_factor = math.sqrt(math.sqrt(2))
if autoscale_args.precision == "high":
    growth_factor = math.sqrt(math.sqrt(math.sqrt(2)))

min_size = autoscale_args.min_size

print("\n\n\n\nmodels:", mods)
print("optimizers:", opts)
print("#GPUs:", num_gpus)
print("starting from size:", min_size)
print("scaling with factor:", growth_factor, "\n\n\n")

im = lambda size: torch.rand(size=(1, 3, int(round(size)), int(round(size)))
                             ) * 255

args = config.load_args("config/img_img.json")
args.print_iter = -1
args.save_iter = -1

max_sizes = {}

for mod, opt, gpus in [(mod, opt, gpus) for gpus in range(1, num_gpus + 1)
                       for opt in opts for mod in mods]:
    conf = f"{mod}+{opt}+{gpus}"
    print(f"\nmodel: {mod.upper()}   optimizer: {opt.upper()}   #GPUs: {gpus}")
    max_sizes[conf] = {}

    if opt == "lbfgs" and gpus == 1:
        size = min_size / growth_factor
    else:
        # size >= that of previous more memory hungry config
Example #10
0
def main():
    print('~~~ Starting dimension estimation! ~~~')

    # load config file
    args = config.load_args()

    # make output folder if it doesn't exist already
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)

    # save args to json file
    with open(args.save_dir + 'commandline_args.txt', 'w') as file2:
        json.dump(args.__dict__, file2, indent=2)

    # get model
    print(' > Loading model...')
    model = get_model(args)
    device = args.device
    model.cuda(device)
    model.eval()

    # get dataset
    print(' > Preparing dataset...')
    dataloader = get_dataloader(args)

    if args.model.split('_')[0] == 'vit':
        patch_size = int(args.model.split('_')[-2][-2:])

    # create dict with n_factor lists and factor list
    factor_list = []
    output_dict = {'example1': [], 'example2': []}

    print(' > Processing starting...')
    # for-loop inference and store values as numpy array
    for i, (factor, example1, example2, _, _) in enumerate(dataloader):

        # move data to GPU
        example1, example2 = example1.cuda(device), example2.cuda(device)

        # pass images through model and get distribution mean
        if args.model.split('_')[0] == 'vit':
            output1 = model(example1, patch=patch_size).mode()[0]
            output2 = model(example2, patch=patch_size).mode()[0]
        else:
            output1 = model(example1).mode()[0]
            output2 = model(example2).mode()[0]

        # add factor and output to list / array for processing dimensions later on
        factor_list.append(factor.detach().cpu().numpy())
        output_dict['example1'].append(output1.detach().cpu().numpy())
        output_dict['example2'].append(output2.detach().cpu().numpy())

        if i % 10000 == 0:
            print('Processing example {}/{}'.format(i, len(dataloader)))

    print(' > Finished processing examples...')
    print(' > Starting Dimensionality Estimation!')

    # dimensionality estimation
    dims, dims_percent = dim_est(output_dict, factor_list, args)

    print(" >>> Estimated factor dimensionalities: {}".format(dims))
    print(" >>> Ratio to total dimensions: {}".format(dims_percent))

    print('Saving results to {}'.format(args.save_dir))

    # save to output folder
    with open(args.save_dir + '/' + args.model + '_dim_est.csv',
              mode='w') as file1:
        writer = csv.writer(file1, delimiter=',')
        writer.writerow(dims)
        writer.writerow(dims_percent)