eval_loss = problem.loss(problem.variables, 'validation')
    test_loss = problem.loss(problem.variables, 'test')
    problem_batches, reset_limits = problems.create_batches_all()
    config_args = config.mlp_norm_history()
    reset_epoch_ext = 20000 / config_args['unroll_len']
    #########################
    epochs = int(1000000 / config_args['unroll_len'])
    epoch_interval = int(500 / config_args['unroll_len'])
    eval_interval = int(50000 / config_args['unroll_len'])
    validation_epochs = int(10000 / config_args['unroll_len'])
    #########################

    num_unrolls_per_epoch = 1
    if restore_network:
        io_path = util.get_model_path(
            flag_optimizer=flag_optimizer,
            model_id=model_id) if restore_network else None
    optim = meta_optimizers.AUGOptims(problem_batches, [],
                                      path=io_path,
                                      args=config.aug_optim())
    optim.build()

    optim_grad = tf.gradients(optim.ops_loss, optim.optimizer_variables)
    optim_grad_norm = [tf.norm(grad) for grad in optim_grad]
    optim_norm = [tf.norm(variable) for variable in optim.optimizer_variables]
    # norm_grads = [tf.norm(gradients) for gradients in optim.problems.get_gradients()]
    problem_norms = []
    for problem in optim.problems:
        norm = 0
        for variable in problem.variables:
            norm += tf.norm(variable)
results_dir = 'tf_summary/'
model_id = '50000'

load_model = True
meta = False
optimize = False

l2l = tf.Graph()
with l2l.as_default():
    args = config.aug_optim()
    epochs = 50
    total_data_points = 55000
    batch_size = 128
    itr_per_epoch = int(total_data_points / batch_size)
    io_path = util.get_model_path(flag_optimizer='Mlp', model_id=model_id)
    all_summ = []
    writer = None
    problem = problems.Mnist({
        'minval': -100.0,
        'maxval': 100.0,
        'conv': False,
        'full': False
    })
    # problem = problems.cifar10({'minval': -100.0, 'maxval': 100.0, 'conv': True, 'path': '../../../cifar/', 'full': False})
    loss = problem.loss(problem.variables)
    acc_train = problem.accuracy(mode='train')
    acc_test = []  # problem.accuracy(mode='test')
    enable_summaries = False
    if meta:
        optim_meta = meta_optimizers.AUGOptims([problem], [], args=args)
예제 #3
0
import json
import time
import os
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Dropout, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.callbacks import ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras import Sequential
import util


BASE_PATH = os.path.dirname(os.path.realpath(__file__))
MODEL_NAME = 'meme_text_gen'
MODEL_PATH = util.get_model_path(BASE_PATH, MODEL_NAME)
os.mkdir(MODEL_PATH)


SEQUENCE_LENGTH = 128
EMBEDDING_DIM = 16
ROWS_TO_SCAN = 2000000
NUM_EPOCHS = 48
BATCH_SIZE = 256


print('loading json data...')
t = time.time()

training_data = json.load(open(BASE_PATH + '/training_data_sample.json'))
예제 #4
0
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import util

fig_folder = 'fig'
flag_optimizer = 'MLP'
model_id = '1000000'
model_id += '_FINAL'
model_path = util.get_model_path(flag_optimizer=flag_optimizer,
                                 model_id=model_id)
save_path = model_path + '_fig_'
debug_files = [model_path + '_optim_io.txt']


def plot3d(x, y, z, labels):
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(x, y, z)
    ax.set_xlabel(labels[0])
    ax.set_ylabel(labels[1])
    ax.set_zlabel(labels[2])
    path = save_path + labels[0] + '_' + labels[1] + '_' + labels[2]
    plt.savefig(path + '.png')
    plt.show()


def plot2d(x, y, labels):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x, y)
def main():
    """Create the model and start the training."""

    w, h = args.input_size.split(",")
    input_size = (int(w), int(h))

    cudnn.enabled = True
    best_result = {
        "miou": 0,
        "miou_t": 0,
        "miou_s": 0,
        "iter": 0,
        "lr": args.learning_rate
    }

    # Create network
    if args.restore_from_where == "pretrained":
        model = Our_Model(split)

        i_iter = 0
    elif args.restore_from_where == "saved":
        restore_from = get_model_path(PRETRAINED_OUR_PATH)
        model_restore_from = restore_from["model"]
        i_iter = 0

        model = Our_Model(split)
        saved_state_dict = torch.load(model_restore_from)
        model.load_state_dict(saved_state_dict)

    elif args.restore_from_where == "continue":
        restore_from = get_model_path(args.snapshot_dir)
        model_restore_from = restore_from["model"]
        i_iter = restore_from["step"]

        model = Our_Model(split)
        saved_state_dict = torch.load(model_restore_from)
        model.load_state_dict(saved_state_dict)

    cudnn.benchmark = True

    # init

    model.train()
    model.to(device)

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    train_strong_loader = prepare_for_train_pixel_dataloader(
        dataroot=args.dataroot,
        bs_train=args.batch_size,
        input_size=input_size,
        shuffle=True,
        split=split)
    train_weak_loader = prepare_for_train_weak_pixel_dataloader(
        dataroot=args.dataroot,
        bs_train=weak_size,
        input_size=input_size,
        shuffle=True,
        split=split)
    test_weak_loader = prepare_for_val_weak_pixel_dataloader(
        dataroot=args.dataroot,
        bs_val=1,
        input_size=input_size,
        shuffle=False,
        split=split)
    test_loader = prepare_for_val_pixel_dataloader(dataroot=args.dataroot,
                                                   bs_val=1,
                                                   input_size=input_size,
                                                   shuffle=False,
                                                   split=split)

    data_len = len(train_strong_loader)
    num_steps = data_len * args.num_epochs

    optimizer = optim.SGD(model.optim_parameters_1x(args),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    optimizer_10x = optim.SGD(model.optim_parameters_10x(args),
                              lr=10 * args.learning_rate,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    optimizer_10x.zero_grad()

    seg_loss = nn.CrossEntropyLoss(ignore_index=255)

    interp = nn.Upsample(size=(input_size[1], input_size[0]),
                         mode="bilinear",
                         align_corners=True)

    with open(RESULT_DIR, "a") as f:
        f.write(SNAPSHOT_PATH.split("/")[-1] + "\n")
    for epoch in range(args.num_epochs):
        train_strong_iter = enumerate(train_strong_loader)
        train_weak_iter = enumerate(train_weak_loader)

        model.train()
        for i in range(data_len):

            loss_pixel = 0
            loss_pixel_value = 0

            optimizer.zero_grad()
            adjust_learning_rate(optimizer, i_iter, num_steps, times=1)

            optimizer_10x.zero_grad()
            adjust_learning_rate(optimizer_10x, i_iter, num_steps, times=10)

            # train strong
            try:
                _, batch = train_strong_iter.__next__()
            except StopIteration:
                train_strong_iter = enumerate(train_strong_loader)
                _, batch = train_strong_iter.__next__()

            images, masks = batch["image"], batch["label"]
            images = images.to(device)
            masks = masks.long().to(device)
            pred = model(images, "all")

            pred = interp(pred)
            loss_pixel = seg_loss(pred, masks)
            loss = loss_pixel

            loss.backward()
            loss_pixel_value += loss.item()

            # train weak
            if epoch >= 0:
                try:
                    _, batch = train_weak_iter.__next__()
                except StopIteration:
                    train_weak_iter = enumerate(train_weak_loader)
                    _, batch = train_weak_iter.__next__()

                images, masks = batch["image"], batch["label"]
                images = images.to(device)
                masks = masks.long().to(device)
                pred = model(images, "all")

                pred = interp(pred)
                pseudo = get_pseudo(pred, masks)

                loss = seg_loss(pred, pseudo) * weak_proportion * lambdaa
                loss.backward()

            optimizer.step()
            optimizer_10x.step()

            print("iter = {0:8d}/{1:8d},  loss_pixel = {2:.3f}".format(
                i_iter, num_steps, loss))

            # save model with max miou
            if i_iter % args.save_pred_every == 0 and i_iter != best_result[
                    "iter"]:
                hist = np.zeros((5, 5))
                model.eval()
                for index, batch in enumerate(test_weak_loader):
                    if index % 10 == 0:
                        print("\r", index, end="")

                    images, labels, size = batch["image"], batch[
                        "label"], batch["size"]
                    w, h = list(map(int, size[0].split(",")))
                    interp_val = nn.Upsample(size=(h, w),
                                             mode="bilinear",
                                             align_corners=True)

                    images = images.to(device)
                    labels = relabel(labels).numpy()
                    # labels = labels.numpy()
                    pred = model(images, "weak")
                    pred = interp_val(pred)

                    pred = pred[0].permute(1, 2, 0)
                    pred = torch.max(pred, 2)[1].byte()
                    pred_cpu = pred.data.cpu().numpy()
                    hist += fast_hist(labels.flatten(), pred_cpu.flatten(), 5)

                mIoUs = per_class_iu(hist)
                print(mIoUs)
                mIoU = round(np.nanmean(mIoUs) * 100, 2)
                print(mIoU)

                # gzsl
                hist_g = np.zeros((20, 20))
                for index, batch in enumerate(test_loader):
                    if index % 10 == 0:
                        print("\r", index, end="")

                    images, labels, size = batch["image"], batch[
                        "label"], batch["size"]
                    w, h = list(map(int, size[0].split(",")))
                    interp_val = nn.Upsample(size=(h, w),
                                             mode="bilinear",
                                             align_corners=True)

                    images = images.to(device)
                    labels = labels.numpy()
                    pred = model(images, "all")
                    pred = interp_val(pred)

                    pred = pred[0].permute(1, 2, 0)
                    pred = torch.max(pred, 2)[1].byte()
                    pred_cpu = pred.data.cpu().numpy()
                    hist_g += fast_hist(labels.flatten(), pred_cpu.flatten(),
                                        20)

                mIoUs_g = per_class_iu(hist_g)
                print(mIoUs_g)
                mIoU_t = round(np.nanmean(mIoUs_g[15:]) * 100, 2)
                mIoU_s = round(np.nanmean(mIoUs_g[:15]) * 100, 2)
                print(mIoU_s)
                print(mIoU_t)

                if mIoU_t > best_result["miou_t"]:
                    print("taking snapshot ...")
                    torch.save(
                        model.state_dict(),
                        osp.join(args.snapshot_dir,
                                 str(i_iter) + "_model.pth"))
                    delete_superfluous_model(args.snapshot_dir, 1)

                    best_result = {
                        "miou_s": mIoU_s,
                        "miou_t": mIoU_t,
                        "miou": mIoU,
                        "iter": i_iter
                    }
                with open(RESULT_DIR, "a") as f:
                    f.write(
                        "i_iter:{:d}\tmiou:{:0.5f}\tmiou_s:{:0.5f}\tmiou_t:{:0.5f}\tbest_result:{}\n"
                        .format(i_iter, mIoU, mIoU_s, mIoU_t, best_result))

            i_iter += 1
예제 #6
0
def setup_object(obj_idx,
                 class_idx,
                 sample_idx,
                 reset_obj=True,
                 total_num_samples=60000,
                 n_buckets=12,
                 random_scaling=0,
                 random_viewpoint=0,
                 random_warp=.1):
    """Load object model, update its material, and place appropriately."""
    global cached_obj_scale, obj_ref

    if reset_obj:
        # Distribute models across sample indices (for clean train/valid splits)
        per_bucket = util.n_models[class_idx] // n_buckets
        bucket_choice = sample_idx // (total_num_samples // n_buckets)
        min_idx = bucket_choice * per_bucket

        # Load object
        obj_path = None
        while obj_path is None:
            try:
                model_idx = np.random.randint(per_bucket) + min_idx
                obj_path = util.get_model_path(class_idx, model_idx)
                bpy.ops.import_scene.obj(filepath=obj_path)
            except:
                obj_path = None

        obj = C.selected_objects[0]
        obj.pass_index = obj_idx + 1
        C.view_layer.objects.active = obj
        obj_ref[obj_idx] = obj

        # Remap UV coordinates
        bpy.ops.object.editmode_toggle()
        bpy.ops.uv.cube_project()
        bpy.ops.object.editmode_toggle()

        # Assign material
        for i in range(len(C.object.material_slots)):
            C.object.active_material_index = i
            C.object.active_material = D.materials[obj_idx + 1]

        cached_obj_scale[obj_idx] = None

    C.object.rotation_euler = (0, 0, 0)
    C.object.location = -np.array(C.object.bound_box).mean(0)
    bpy.ops.object.origin_set(type='ORIGIN_CURSOR', center='MEDIAN')

    if cached_obj_scale[obj_idx] is None:
        C.object.scale *= (.7 / max(C.object.dimensions))
        cached_obj_scale[obj_idx] = [C.object.scale[i] for i in range(3)]
    else:
        C.object.scale = cached_obj_scale[obj_idx]

    C.object.rotation_euler = (np.pi / 2, 0, -np.pi / 6)

    # Random scaling
    scale_factor = np.clip(np.random.randn() * random_scaling + .8, 0.1, 1.5)
    C.object.scale *= scale_factor

    # Slight warping of individual object scales
    warp_factor = np.random.randn(3)
    warp_factor -= warp_factor.mean()  # Keep total scale roughly consistent
    warp_factor = np.clip(warp_factor * random_warp + 1, .5, 1.5)
    for i in range(3):
        C.object.scale[i] *= warp_factor[i]

    # Random viewpoint shift
    viewpoint_shift = np.random.randn(3) * random_viewpoint
    C.object.rotation_euler += viewpoint_shift