Exemple #1
0
    def worker(mix_k):
        local_hparams = JsonConfig(hparams_dir)
        local_hparams.Mixture.num_component = mix_k
        model = "GenMM-K{}" if local_hparams.Mixture.naive else "LatMM-K{}"
        local_hparams.Dir.log_root = os.path.join(local_hparams.Dir.log_root,
                                                  model.format(mix_k))
        this_device = next(device_iter)
        local_hparams.Device.glow[0] = this_device
        local_hparams.Device.data = this_device
        print("Dir: {} and device: {}".format(local_hparams.Dir.log_root,
                                              this_device))
        peeked = False
        if not peeked:
            tmp_dataloader = torch.utils.data.DataLoader(dataset_ins,
                                                         batch_size=64,
                                                         shuffle=True,
                                                         num_workers=int(2))
            img = next(iter(tmp_dataloader))[0]

            if not os.path.exists(local_hparams.Dir.log_root):
                os.makedirs(local_hparams.Dir.log_root)
            # peek the training data set
            vutils.save_image(
                img.add(0.5),
                os.path.join(local_hparams.Dir.log_root,
                             "img_under_evaluation.png"))
            peeked = True

        built = build(local_hparams, True)
        trainer = Trainer(**built, dataset=dataset_ins, hparams=local_hparams)
        trainer.train()
def test_glow():
    print("[Test]: Glow")
    from glow.config import JsonConfig
    glow = models.Glow(JsonConfig("hparams/celeba_test.json"))
    # img = cv2.imread("pictures/tsuki.jpeg")
    # img = cv2.resize(img, (32, 32))
    # img = (img / 255.0).astype(np.float32)
    # img = img[:, :, ::-1].transpose(2, 0, 1)
    # x = torch.Tensor([img]*8)

    x = torch.Tensor(np.random.rand(8, 1, 32, 32))
    print('x.size = ', x.size())

    batch_size = 8
    nb_digits = 10
    y = torch.LongTensor(batch_size).random_() % nb_digits
    print('y = ', y)
    print('y.view(-1,1) = ', y.view(-1, 1))
    y_onehot = torch.FloatTensor(batch_size, nb_digits)
    y_onehot.zero_()
    y_onehot.scatter_(1, y.view(-1, 1), 1)
    print('y_onehot:', y_onehot)

    z, det, y_logits = glow(x=x, y_onehot=y_onehot)
    print(z.size())
    print(det)

    print(models.Glow.loss_generative(det))
    print('y_logits =  ', y_logits)
    print(models.Glow.loss_class(y_logits, y))
    def __init__(self, test_class_index, is_mean, K, class_number):
        super(AssociateGlowGenerated, self).__init__()
        self.glow = models.Glow(JsonConfig("hparams/omni_all_bg.json"),
                                test_class_index=test_class_index,
                                is_mean=is_mean,
                                K=K,
                                y_classes=class_number)
        self.class_number = class_number
        self.eval_index = test_class_index
        self.criterion = nn.MSELoss()
        self.ones_tensor = torch.ones((1, 32, 32)).float().cuda()

        self.glow_generate = models.Glow(
            JsonConfig("hparams/omni_all_bg.json"),
            test_class_index=test_class_index,
            is_mean=is_mean,
            K=K,
            y_classes=class_number)
Exemple #4
0
    def worker(label):
        # load the subset data of the label
        local_hparams = JsonConfig(hparams_dir)

        local_hparams.Dir.log_root = os.path.join(local_hparams.Dir.log_root,
                                                  "classfier{}".format(label))
        dataset = load_obj(
            os.path.join(dataset_root,
                         "classSets/" + "subset{}".format(label)))
        if True:
            tmp_dataloader = torch.utils.data.DataLoader(dataset,
                                                         batch_size=64,
                                                         shuffle=True,
                                                         num_workers=int(2))
            img = next(iter(tmp_dataloader))

            if not os.path.exists(local_hparams.Dir.log_root):
                os.makedirs(local_hparams.Dir.log_root)

            vutils.save_image(
                img.data.add(0.5),
                os.path.join(local_hparams.Dir.log_root,
                             "img_under_evaluation.png"))

        # dump the json file for performance evaluation
        if not os.path.exists(
                os.path.join(local_hparams.Dir.log_root,
                             local_hparams.Data.dataset + ".json")):
            get_hparams = JsonConfig(hparams_dir)
            data_dir = get_hparams.Data.dataset_root
            get_hparams.Data.dataset_root = data_dir.replace("separate", "all")
            get_hparams.dump(dir_path=get_hparams.Dir.log_root,
                             json_name=get_hparams.Data.dataset + ".json")

        ### build model and train
        built = build(local_hparams, True)

        print(hparams.Dir.log_root)
        trainer = Trainer(**built, dataset=dataset, hparams=local_hparams)
        trainer.train()
def test_glow():
    print("[Test]: Glow")
    from glow.config import JsonConfig
    glow = models.Glow(JsonConfig("hparams_celeba.json"))
    img = cv2.imread("tsuki.jpeg")
    img = cv2.resize(img, (64, 64))
    img = (img / 255.0).astype(np.float32)
    img = img[:, :, ::-1].transpose(2, 0, 1)
    x = torch.Tensor([img] * 8)
    y_onehot = torch.zeros((8, 40))
    z, det, y_logits = glow(x=x, y_onehot=y_onehot)
    print(z.size())
    print(det)
    print(models.Glow.loss_generative(det))
def test_glow():
    print("[Test]: Glow")
    from glow.config import JsonConfig
    glow = models.Glow(JsonConfig("hparams/celeba_test.json"),
                       is_mean=True,
                       test_class_index=[1, 2],
                       K=4,
                       y_classes=10,
                       arc_loss=True)
    # img = cv2.imread("pictures/tsuki.jpeg")
    # img = cv2.resize(img, (32, 32))
    # img = (img / 255.0).astype(np.float32)
    # img = img[:, :, ::-1].transpose(2, 0, 1)
    # x = torch.Tensor([img]*8)
    # glow.set_z_add_random()
    glow.cuda()

    x = torch.Tensor(np.random.rand(12, 1, 32, 32))
    print('x.size = ', x.size())

    batch_size = 12
    nb_digits = 10
    y = torch.LongTensor(batch_size).random_() % nb_digits
    print('y = ', y)
    print('y.view(-1,1) = ', y.view(-1, 1))
    y_onehot = torch.FloatTensor(batch_size, nb_digits)
    y_onehot.zero_()
    y_onehot.scatter_(1, y.view(-1, 1), 1)
    print('y_onehot:', y_onehot)

    z, det, y_logits = glow(x=x, y_onehot=y_onehot)
    y_logits = glow.class_flow(z, y_onehot)
    print('z.size() =  ', z.size())
    print('det = ', det)
    print('y_logits = ', y_logits)

    print(models.Glow.loss_generative(det))
    # print('y_logits =  ',y_logits)
    print(models.Glow.loss_class(y_logits, y))
from glow.builder import build
from glow.trainer import Trainer
from glow.generator import Generator
from glow.config import JsonConfig
from torch.utils.data import DataLoader

if __name__ == "__main__":
    args = docopt(__doc__)
    hparams = args["<hparams>"]
    dataset = args["<dataset>"]
    assert dataset in motion.Datasets, (
        "`{}` is not supported, use `{}`".format(dataset,
                                                 motion.Datasets.keys()))
    assert os.path.exists(hparams), (
        "Failed to find hparams josn `{}`".format(hparams))
    hparams = JsonConfig(hparams)
    dataset = motion.Datasets[dataset]

    date = str(datetime.datetime.now())
    date = date[:date.rfind(":")].replace("-", "")\
                                 .replace(":", "")\
                                 .replace(" ", "_")
    log_dir = os.path.join(hparams.Dir.log_root, "log_" + date)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    print("log_dir:" + str(log_dir))

    is_training = hparams.Infer.pre_trained == ""

    data = dataset(hparams, is_training)
Exemple #8
0
                step, key, accuracy))
            #value.append(accuracy.cpu().data.numpy())
            value.append(accuracy)
            data_loader = None

    accuracy_dict["step"] = step_batches
    return accuracy_dict


if __name__ == "__main__":
    args = docopt(__doc__)
    hparams_dir = args["<hparams>"]
    assert os.path.exists(hparams_dir), (
        "Failed to find hparams josn `{}`".format(hparams))

    hparams = JsonConfig(hparams_dir)
    hparams.Dir.log_root = os.path.dirname(hparams_dir)
    hparams.Dir.classifier_dir = os.path.join(hparams.Dir.log_root,
                                              "classfier{}/log")

    log_dir = hparams.Dir.log_root
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    accuracy_dict = testing(hparams)

    score_dir = os.path.join(hparams.Dir.log_root, "accuracy.pkl")
    with open(score_dir, "wb") as f:
        pickle.dump(accuracy_dict, f)

    value_pd = pd.DataFrame(accuracy_dict)
    hparams = args["<hparams>"]
    dataset_root = args["<dataset_root>"]
    z_dir = args["<z_dir>"]
    assert os.path.exists(dataset_root), (
        "Failed to find root dir `{}` of dataset.".format(dataset_root))
    assert os.path.exists(hparams), (
        "Failed to find hparams josn `{}`".format(hparams))
    if not os.path.exists(z_dir):
        print("Generate Z to {}".format(z_dir))
        os.makedirs(z_dir)
        generate_z = True
    else:
        print("Load Z from {}".format(z_dir))
        generate_z = False

    hparams = JsonConfig("hparams/celeba.json")
    dataset = vision.Datasets["celeba"]
    # set transform of dataset
    transform = transforms.Compose([
        transforms.CenterCrop(hparams.Data.center_crop),
        transforms.Resize(hparams.Data.resize),
        transforms.ToTensor()
    ])
    # build
    graph = build(hparams, False)["graph"]
    dataset = dataset(dataset_root, transform=transform)

    # get Z
    if not generate_z:
        # try to load
        try:
Exemple #10
0
import argparse

from glow.builder import build
from glow.config import JsonConfig
from glow.dataset import Speech2FaceDataset
from glow.trainer import Trainer

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("config", default="hparams/speech2face_gpu.json")
    parser.add_argument("--small", action="store_true")
    args = parser.parse_args()

    hparams = JsonConfig(args.config)

    # build graph and dataset
    built = build(hparams, True)

    train_files = hparams.Files.train
    validation_files = hparams.Files.validation

    if args.small:
        train_files = train_files[:2]
        validation_files = validation_files[:2]

    train_dataset = Speech2FaceDataset(
        train_files,
        data_dir=hparams.Dir.data,
        total_frames=hparams.Glow.image_shape[0],
        audio_feature_type=hparams.Data.audio_feature_type,
    )

# 以图片的形式保存结果x
def show_as_images(x):
    assert len(x.shape) == 3, 'Batch x H x W !'
    batch_size = x.shape[0]
    fig = plt.figure()
    for i in range(batch_size):
        plt.subplot(batch_size, 1, i + 1)
        plt.imshow(x[i])
    plt.savefig(os.path.join(z_dir, 'x.png'))
    return


# 主函数
hparams = JsonConfig("hparams/myo.json")
# build
graph = build(hparams, False)["graph"]

# 为这个程序设计一个gui,可以人工选择z的值,然后自动计算其对应的sEMG linear envelop是什么样子的
# 制作gui相关的东西
import tkinter as tk

window = tk.Tk()
window.title('z => x')
window.geometry(('1000x2000'))

z_shape = (16, 8, 2)
z_dims = 256  # z的特征值大小
z = np.zeros(z_dims)