Esempio n. 1
0
 def forward(self, input, out_chn, in_chn, in_h, in_w, f_h, f_w, conv_type):
     #print("进程执行之前先把参数写进文件中,然后开启一个进程在c程序中将参数读入到程序中")
     saveImage(input)
     print("执行1_1_conv")
     if(os.system("../excute/1_1_conv.out")==0):
         #print("进程结束之后,从输出的文件中读取结果")
         pass
def generator_sampler(opt):
    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    netG = get_generator_model(opt)
    netG.load_state_dict(torch.load(get_generator_loc(opt)))
    netG.eval()

    opt.name = opt.outf + "samples/" + \
        opt.data + "/" + opt.model + str(opt.epoch)
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            saveFeature(opt.name, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.name)
    netG.cuda()

    noise = Variable(torch.FloatTensor(opt.batchSize, 100, 1, 1).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            fake = netG(noise)
            for j in range(0, len(fake.data)):
                saveImage(
                    fake.data[j], opt.name + "/" + str(subfolder) + "/" +
                    giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                    break
            if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                break
        if iter >= opt.sampleSize:
            break

    if opt.dataset == 'mnist_s':
        print("Warning: subclass experiment.. Not saving features..")
    else:
        saveFeature(opt.name, opt, opt.feature_model)
    peek(opt.data, opt.model + str(opt.epoch))

    with open(opt.name + "/mark", "w") as f:
        f.write("")
Esempio n. 3
0
 def refresh_cookie(self):
     url = "http://www.shangxueba365.com/"
     try:
         rep = self.session.get(url, headers=self.base_headers, timeout=self.timeout)
         rep.raise_for_status()
         img_base64 = re.findall(
             r'<img class="verifyimg" alt="verify_img" src="data:image/bmp;base64,(.*?)"/>', rep.text)[0]
         img_bytes = base64.b64decode(img_base64)
         img_name = "verify.jpg"
         saveImage(img_bytes, img_name)
         showImage(img_name)
         time.sleep(1)
         verify_code = input("输入验证码:")
         removeImage(img_name)
         # ==============
         # var val = "";
         #         for (var i = 0; i < str.length; i++) {
         #             if (val == "")
         #                 val = str.charCodeAt(i).toString(16);
         #             else
         #                 val += str.charCodeAt(i).toString(16);
         #         }
         # ==============
         啥 = ""
         for item in verify_code:
             啥 += str(ord(item) - 18)
         # ==============
         rep = self.session.get(url + "/?security_verify_img=" + 啥, headers=self.base_headers, timeout=self.timeout)
         cookie_dict = requests.utils.dict_from_cookiejar(self.session.cookies)
         with open("cookie.json", "w") as f:
             json.dump(cookie_dict, f)
     except Exception:
         raise RefreshException("cookie获取失败")
Esempio n. 4
0
def peek(dat, folder, force=False):
    g = Globals()
    outf = g.default_repo_dir

    mkdir(outf + "peek")
    mkdir(outf + "peek/" + dat)

    print("\nPeeking " + folder + " for " + dat)
    dir = outf + "samples/" + dat + "/" + folder
    print("dir", dir)

    if (not force) and (os.path.exists(outf + 'peek/%s/%s.png' %
                                       (dat, folder.replace("/", "_")))):
        print("Already peeked before. Now exit.")
        return

    dataset = dset.ImageFolder(root=dir,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=96,
                                             shuffle=True,
                                             num_workers=2)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        saveImage(img,
                  outf + 'peek/%s/%s.png' % (dat, folder.replace("/", "_")),
                  nrow=12)
        break
Esempio n. 5
0
def peek(dat, folder, force=False):
    g = Globals()
    outf = g.default_repo_dir

    mkdir(outf + "peek")
    mkdir(outf + "peek/" + dat)

    print("\nPeeking " + folder + " for " + dat)
    dir = outf + "samples/" + dat + "/" + folder
    print("dir", dir)

    if (not force) and (os.path.exists(outf + 'peek/%s/%s.png' % (dat, folder.replace("/", "_")))):
        print("Already peeked before. Now exit.")
        return

    dataset = dset.ImageFolder(root=dir,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize(
                                       (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                               ]))
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=96,
                                             shuffle=True, num_workers=2)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        saveImage(img, outf + 'peek/%s/%s.png' %
                  (dat, folder.replace("/", "_")), nrow=12)
        break
def generator_sampler(opt):
    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    netG = get_generator_model(opt)
    netG.load_state_dict(torch.load(get_generator_loc(opt)))
    netG.eval()

    opt.name = opt.outf + "samples/" + \
        opt.data + "/" + opt.model + str(opt.epoch)
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            saveFeature(opt.name, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.name)
    netG.cuda()

    noise = Variable(torch.FloatTensor(opt.batchSize, 100, 1, 1).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            fake = netG(noise)
            for j in range(0, len(fake.data)):
                saveImage(fake.data[j], opt.name + "/" +
                          str(subfolder) + "/" + giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                    break
            if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                break
        if iter >= opt.sampleSize:
            break

    if opt.dataset == 'mnist_s':
        print("Warning: subclass experiment.. Not saving features..")
    else:
        saveFeature(opt.name, opt, opt.feature_model)
    peek(opt.data, opt.model + str(opt.epoch))

    with open(opt.name + "/mark", "w") as f:
        f.write("")
Esempio n. 7
0
    def train(self):

        ## Optim
        self.optimG = optim.Adam(self.netG.parameters(),
                                 lr=self.args.lr[0],
                                 betas=(self.args.beta1, self.args.beta2),
                                 weight_decay=5e-5)
        self.optimD = optim.Adam(self.netD.parameters(),
                                 lr=self.args.lr[1],
                                 betas=(self.args.beta1, self.args.beta2),
                                 weight_decay=5e-5)

        self.criterion = nn.BCELoss()

        ## Data
        self.realLabel = torch.ones((self.args.batchSize)).to(self.device)
        self.fakeLabel = torch.zeros((self.args.batchSize)).to(self.device)
        trainLoader = getDataloader(self.args)

        for epoch in range(self.args.numEpoch):
            for i, (latent, face32, face64, face128) in enumerate(trainLoader):
                latent = latent.to(self.device)
                face32, face64, face128 = face32.to(self.device), face64.to(
                    self.device), face128.to(self.device)

                fake32, fake64, fake128 = self.netG(latent)

                dLoss = self.trainD(
                    [fake32.detach(),
                     fake64.detach(),
                     fake128.detach()], [face32, face64, face128])

                if (dLoss[0] + dLoss[3] <=
                        1.3) and (dLoss[1] + dLoss[4] <=
                                  1.3) and (dLoss[2] + dLoss[5] <= 1.3):
                    gLoss = self.trainG([fake32, fake64, fake128])
                else:
                    gLoss = [0.0, 0.0, 0.0]

                if i % 1 == 0:
                    self.logger.log(
                        "[%3d/%3d]][%5d/%5d] : D32(%.3f = F%.3f + R%.3f) D64(%.3f = F%.3f + R%.3f)  D128(%.3f = F%.3f + R%.3f) G(%.3f, %.3f, %.3f)"
                        % (epoch, self.args.numEpoch, i, len(trainLoader),
                           dLoss[0] + dLoss[3], dLoss[0], dLoss[3], dLoss[1] +
                           dLoss[4], dLoss[1], dLoss[4], dLoss[2] + dLoss[5],
                           dLoss[2], dLoss[5], gLoss[0], gLoss[1], gLoss[2]))

                saveImage(self.args, epoch, i,
                          [face32, face64, face128, fake32, fake64, fake128],
                          20)

            if epoch % 10 == 0:
                torch.save(self.netG.state_dict(),
                           self.args.savePath + "G_%d.pth" % epoch)
                torch.save(self.netD.state_dict(),
                           self.args.savePath + "D_%d.pth" % epoch)
Esempio n. 8
0
    def postprocess(self, input1):
        sd = Shared()
        self.__logger.debug('postprocessing...')
        if not self.__isTrain:
            # path of test data file
            save_path = self.__output_path
            utils.saveImage(input1, save_path + 'infer.png')
            # utils.displayImage(input)

        sd.setFlag('nowExit', True)
        print('done')
Esempio n. 9
0
 def saveImages(self):
     """
     Save the generated images in the outputFolder,
     """
     for styleN, dic in self.genIm.items():
         for contentN, im in dic.items():
             filename = f'{self.getImageName(contentN)}_as_{self.getImageName(styleN)}.jpg'
             filename = os.path.join(self.outputFolder, filename)
             utils.saveImage(im,
                             filename,
                             original=content,
                             keepColors=self.keepColors)
def subclass_sampler(opt):
    assert(opt.data == 'mnist')
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    print_prop(opt)

    saved = []
    for i in range(0, 10):
        saved.append([])
    opt.outTrue9 = opt.outf + "samples/" + opt.data + "9/true"
    if (os.path.exists(opt.outTrue9)):
        if (os.path.exists(opt.outTrue9 + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")

    dataset, dataloader = getDataSet(opt)

    for batch_idx, (data, target) in enumerate(dataloader):
        for d, t in zip(data, target):
            saved[t].append(d * 0.3081 + 0.1307)

    opt.data_pre = opt.data
    for i in range(0, 10):

        mkdir(opt.outf + "samples")
        mkdir(opt.outf + "samples/" + opt.data_pre + str(i))
        curFolder = opt.outf + "samples/" + opt.data_pre + str(i) + "/true/"
        mkdir(curFolder)

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        for s in range(0, len(saved[i])):
            if s % 600 == 0:
                subfolder += 1
                mkdir(curFolder + str(subfolder))
            saveImage(saved[i][s] * 2 - 1, curFolder +
                      str(subfolder) + "/" + giveName(s) + ".png")

        peek(opt.data, 'true', True)
        torch.save(saved[i], curFolder + "dat.pth")

        with open(curFolder + "/mark", "w") as f:
            f.write("")
Esempio n. 11
0
def subclass_sampler(opt):
    assert (opt.data == 'mnist')
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    print_prop(opt)

    saved = []
    for i in range(0, 10):
        saved.append([])
    opt.outTrue9 = opt.outf + "samples/" + opt.data + "9/true"
    if (os.path.exists(opt.outTrue9)):
        if (os.path.exists(opt.outTrue9 + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")

    dataset, dataloader = getDataSet(opt)

    for batch_idx, (data, target) in enumerate(dataloader):
        for d, t in zip(data, target):
            saved[t].append(d * 0.3081 + 0.1307)

    opt.data_pre = opt.data
    for i in range(0, 10):

        mkdir(opt.outf + "samples")
        mkdir(opt.outf + "samples/" + opt.data_pre + str(i))
        curFolder = opt.outf + "samples/" + opt.data_pre + str(i) + "/true/"
        mkdir(curFolder)

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        for s in range(0, len(saved[i])):
            if s % 600 == 0:
                subfolder += 1
                mkdir(curFolder + str(subfolder))
            saveImage(saved[i][s] * 2 - 1,
                      curFolder + str(subfolder) + "/" + giveName(s) + ".png")

        peek(opt.data, 'true', True)
        torch.save(saved[i], curFolder + "dat.pth")

        with open(curFolder + "/mark", "w") as f:
            f.write("")
Esempio n. 12
0
def saveSceneAsMaps(path, scene):
    path = path[:-1]
    file_name = os.path.basename(path)

    edgeMap, corners = utils.genLayoutEdgeMap(scene, pm.layoutMapSize)
    # utils.saveImage(edgeMap, path + '/label_edge_vp.png')
    utils.saveImage(edgeMap, os.path.join(path, "{}_EM.jpg".format(file_name)))

    corner_file = os.path.join(path, "{}.txt".format(file_name))
    with open(corner_file, 'w') as f:
        print('Write {} corners to  file at: {}'.format(
            len(corners), corner_file))
        for corner in corners.keys():
            f.write('{} {}\n'.format(corner[0], corner[1]))

    oMap = utils.genLayoutOMap(scene, pm.layoutMapSize)
    utils.saveImage(oMap, path + '/label_omap.png')

    normalMap = utils.genLayoutNormalMap(scene, pm.layoutMapSize)
    utils.saveImage(normalMap, path + '/label_normal.png')

    depthMap = utils.genLayoutDepthMap(scene, pm.layoutMapSize)
    utils.saveDepth(depthMap, path + '/label_depth.png')

    obj2dMap = utils.genLayoutObj2dMap(scene, pm.layoutMapSize)
    utils.saveImage(obj2dMap, path + '/label_object2d.png')
Esempio n. 13
0
def noise_sampler(opt):

    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)

    opt.name = opt.outf + "samples/noise/true"
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")
    mkdir(opt.name)

    noise = Variable(torch.FloatTensor(opt.batchSize, 3, 64, 64).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            for j in range(0, noise.data.size(0)):
                saveImage(
                    noise.data[j], opt.name + "/" + str(subfolder) + "/" +
                    giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0:
                    break
            if iter % opt.folderSize == 0:
                break
        if iter >= opt.sampleSize:
            break
    saveFeature(opt.name, opt)
    peek(opt.data, opt.model)

    with open(opt.name + "/mark", "w") as f:
        f.write("")
Esempio n. 14
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    content_image = loadImage(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = transformNet()
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        generate = style_model(content_image).cpu()
    saveImage(args.generate_image, generate[0])
    print("generate image saved as", args.generate_image)
Esempio n. 15
0
def noise_sampler(opt):

    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)

    opt.name = opt.outf + "samples/noise/true"
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")
    mkdir(opt.name)

    noise = Variable(torch.FloatTensor(opt.batchSize, 3, 64, 64).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            for j in range(0, noise.data.size(0)):
                saveImage(noise.data[j], opt.name + "/" +
                          str(subfolder) + "/" + giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0:
                    break
            if iter % opt.folderSize == 0:
                break
        if iter >= opt.sampleSize:
            break
    saveFeature(opt.name, opt)
    peek(opt.data, opt.model)

    with open(opt.name + "/mark", "w") as f:
        f.write("")
Esempio n. 16
0
def saveSceneAsMaps(path, scene):

    edgeMap = utils.genLayoutEdgeMap(scene, pm.layoutMapSize)
    utils.saveImage(edgeMap, path + '/label_edge_vp.png')

    oMap = utils.genLayoutOMap(scene, pm.layoutMapSize)
    utils.saveImage(oMap, path + '/label_omap.png')

    normalMap = utils.genLayoutNormalMap(scene, pm.layoutMapSize)
    utils.saveImage(normalMap, path + '/label_normal.png')

    depthMap = utils.genLayoutDepthMap(scene, pm.layoutMapSize)
    utils.saveDepth(depthMap, path + '/label_depth.png')

    obj2dMap = utils.genLayoutObj2dMap(scene, pm.layoutMapSize)
    utils.saveImage(obj2dMap, path + '/label_object2d.png')
Esempio n. 17
0
            def closure():
                optimizer.zero_grad()

                model.forward(inputImage)

                closs = 0.
                sloss = 0.

                for cl in self.contentsLossModules:
                    closs += cl.getLoss()

                for sl in self.stylesLossModules:
                    sloss += sl.getLoss()

                loss = closs + sloss

                loss.backward()

                n[0] += 1

                if self.logLoss and n[0] % 50 == 49:
                    print(
                        f"Iteration {n[0]+1:5d}/{self.nLoops}; Style loss: {sloss:9.4f}; Contents loss: {closs:9.4f}; Total loss: {loss:9.4f}"
                    )

                if self.saveSnapshotEvery > 0 and n[
                        0] % self.saveSnapshotEvery == self.saveSnapshotEvery - 1:
                    filename = f'{contentNs}_as_{styleNs}_{n[0]+1}.png'
                    filename = os.path.join(self.outputFolder, filename)
                    output = inputImage.clone().detach().squeeze()
                    utils.saveImage(self.trans(output),
                                    filename,
                                    original=contentN,
                                    keepColors=self.keepColors)

                return loss
Esempio n. 18
0
def saveSceneAsMaps(path, scene, size):

    edgeMap = utils.genLayoutEdgeMap(scene, size)
    utils.saveImage(edgeMap, path + '/edge.png')

    oMap = utils.genLayoutOMap(scene, size)
    utils.saveImage(oMap, path + '/omap.png')

    normalMap = utils.genLayoutNormalMap(scene, size)
    utils.saveImage(normalMap, path + '/normal.png')

    depthMap = utils.genLayoutDepthMap(scene, size)
    utils.saveDepth(depthMap, path + '/depth.png')
    def img(self, content):
        txt = ''
        if 'se-image' in str(content) or 'se_image' in str(content):
            for sub_content in content.select('img'):
                url = sub_content['data-lazy-src']
                if self.markdown_mdoe:
                    txt += '![' + './img/' + str(self.counter) + '.png' + ']('
                    txt += './img/' + str(self.counter) + '.png' + ')'
                    txt += '\n'
                else:
                    txt += '[' + str(self.counter) + ']'
                    txt += url
                    txt += '\n'

                if not utils.saveImage(
                        url, self.folder_path + '/img/' + str(self.counter) +
                        '.png'):
                    print('\t' + str(content) + ' 를 저장합니다.')
                else:
                    self.counter += 1
            txt += self.endline
            return txt
        return None
Esempio n. 20
0
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 21
0
    def __init__(self, thermal_image=None, filename=None):
        """Initializer for the main window."""
        self.exthandler = WindowHandler()
        if thermal_image is not None:
            mat = thermal_image.thermal_np.astype(np.float32)

            if mat.shape != (512, 640):
                y0, x0 = mat.shape
                mat = zoom(mat, [512 / y0, 640 / x0])

            self.mat = mat
            self.mat_orig = mat.copy()
            self.mat_emm = mat.copy()
            self.raw = thermal_image.raw_sensor_np
            self.meta = thermal_image.meta
            self.overlays = pygame.Surface((640, 512), pygame.SRCALPHA)
        else:
            with open(filename, "rb") as f:
                data = pickle.load(f)
            self.mat = data.mat
            self.mat_orig = data.mat_orig
            self.mat_emm = data.mat_emm
            self.raw = data.raw
            self.meta = data.meta
            self.overlays = pygame.image.fromstring(data.overlays, (640, 512),
                                                    "RGBA")

            for entry in data.tableEntries:
                self.exthandler.addToTable(entry)
            self.exthandler.loadGraph(data.plots)
            self.exthandler.addRects(data.rects)

        self.colorMap = "jet"
        self.lineNum = 0
        self.boxNum = 0
        self.spotNum = 0
        self.areaMode = "poly"
        self.selectionComplete = False
        self.work("colorMap", self.colorMap)

        self.mode = "main"
        # Dictionary of pages. Each page is a manager.
        self.managers = {}
        self.managers["main"] = Manager(buttons=[
            ((15, 15), (215, 45), "Spot marking",
             lambda: self.changeMode("spot")),
            (
                (15, 75),
                (215, 45),
                "Line measurement",
                lambda: self.changeMode("line"),
            ),
            ((15, 135), (215, 45), "Area marking",
             lambda: self.changeMode("area")),
            ((15, 195), (215, 45), "ROI scaling",
             lambda: self.changeMode("scale")),
            (
                (15, 255),
                (215, 45),
                "Change colorMap",
                lambda: self.changeMode("colorMap"),
            ),
            (
                (15, 315),
                (215, 45),
                "Emissivity scaling",
                lambda: self.changeMode("emissivity"),
            ),
            (
                (15, 470),
                (215, 45),
                "Reset modifications",
                lambda: self.work("reset"),
            ),
            ((15, 530), (100, 45), "Open image", lambda: self.work("open")),
            ((130, 530), (100, 45), "Save image", lambda: saveImage(self)),
        ])
        self.managers["spot"] = Manager(
            buttons=[((15, 530), (215, 45), "Back",
                      lambda: self.changeMode("main"))],
            textbox=((15, 15), (215, -1), "Click to mark spots"),
        )
        self.managers["line"] = Manager(
            buttons=[
                (
                    (15, 410),
                    (215, 45),
                    "Continue",
                    lambda: self.work("line")
                    if len(self.linePoints) == 2 else None,
                ),
                ((15, 470), (215, 45), "Reset",
                 lambda: self.changeMode("line")),
                ((15, 530), (215, 45), "Back",
                 lambda: self.changeMode("main")),
            ],
            textbox=(
                (15, 15),
                (215, -1),
                "Click to mark the end points of the line. Click continue to get plot and reset to remove the line",
            ),
        )
        self.managers["area"] = Manager(
            buttons=[
                ((15, 470), (215, 45), "Continue", lambda: self.work("area")),
                ((15, 530), (215, 45), "Back",
                 lambda: self.changeMode("main")),
            ],
            textbox=(
                (15, 15),
                (215, -1),
                "Click and drag to draw selection. Select continue to mark",
            ),
        )
        self.managers["scale"] = Manager(
            buttons=[
                (
                    (15, 270),
                    (215, 45),
                    "Switch to rect mode",
                    lambda: self.work("scale", "switchMode"),
                ),
                (
                    (15, 350),
                    (215, 45),
                    "Continue",
                    lambda: self.work("scale", "scale")
                    if self.selectionComplete else None,
                ),
                (
                    (15, 410),
                    (215, 45),
                    "Reset scaling",
                    lambda: self.work("scale", "reset"),
                ),
                (
                    (15, 470),
                    (215, 45),
                    "Reset selection",
                    lambda: self.changeMode("scale"),
                ),
                ((15, 530), (215, 45), "Back",
                 lambda: self.changeMode("main")),
            ],
            textbox=(
                (15, 15),
                (215, -1),
                "Click to mark vertices. Press Ctrl and click to close the selection",
            ),
        )
        self.managers["colorMap"] = Manager(buttons=[
            ((15, 15), (215, 45), "Jet", lambda: self.work("colorMap", "jet")),
            ((15, 75), (215, 45), "Hot", lambda: self.work("colorMap", "hot")),
            ((15, 135), (215, 45), "Cool",
             lambda: self.work("colorMap", "cool")),
            ((15, 195), (215, 45), "Gray",
             lambda: self.work("colorMap", "gray")),
            (
                (15, 255),
                (215, 45),
                "Inferno",
                lambda: self.work("colorMap", "inferno"),
            ),
            (
                (15, 315),
                (215, 45),
                "Copper",
                lambda: self.work("colorMap", "copper"),
            ),
            (
                (15, 375),
                (215, 45),
                "Winter",
                lambda: self.work("colorMap", "winter"),
            ),
            ((15, 530), (215, 45), "Back", lambda: self.changeMode("main")),
        ])
        self.managers["emissivity"] = Manager(
            buttons=[
                (
                    (15, 410),
                    (215, 45),
                    "Continue",
                    lambda: self.work("emissivity", "update")
                    if self.selectionComplete else None,
                ),
                (
                    (15, 470),
                    (215, 45),
                    "Reset",
                    lambda: self.work("emissivity", "reset"),
                ),
                ((15, 530), (215, 45), "Back",
                 lambda: self.changeMode("main")),
            ],
            textbox=(
                (15, 15),
                (215, -1),
                "Select region, enter values and press continue. Click to mark vertices."
                "Press Ctrl and click to close the selection",
            ),
            fields=[
                ((15, 165), (215, 45), "Emissivity:"),
                ((15, 240), (215, 45), "Reflected Temp.:"),
                ((15, 315), (215, 45), "Atmospheric Temp.:"),
            ],
        )

        self.linePoints = []

        self.cursor_rect = self.cursors[0].get_rect()
        self.background = pygame.Surface(WINDOW_SIZE)
        self.background.fill((0, 0, 0))
Esempio n. 22
0
padding = 10


def map_to_screen(v):
    return [(v[0] + 1) * (width - padding) / 2,
            (v[1] + 1) * (height - padding) / 2, v[2]]


if __name__ == "__main__":
    timestart = time.perf_counter()
    image = utils.createImage(width, height)
    m = Model("obj/african_head.obj")
    timeend = time.perf_counter()
    print("Read model :: ", (timeend - timestart), "s")

    timestart = time.perf_counter()
    for i, f in enumerate(m.faces):
        v = np.array([map_to_screen(m.vertices[f[i][0]]) for i in range(3)])
        # triangle_line_sweep(v, image, utils.RANDOM())
        triangle(v, image, shader=lambda bc: utils.RANDOM())
    timeend = time.perf_counter()
    print("Render image :: ", (timeend - timestart), "s")

    timestart = time.perf_counter()
    utils.saveImage("out/4_mesh_flat_fill.jpg", image)
    timeend = time.perf_counter()
    print("Save to file :: ", (timeend - timestart), "s")

    window = MainWindow(width, height)
    window.showImage(image)
Esempio n. 23
0
def getDat(opt, dataList, outf, mixType="pix", singleFolder=False):
    size = []
    remain = opt.mixSize
    for entry in dataList:
        size.append(int(entry.fraction * opt.mixSize))  # get the correct size
        remain -= size[-1]  # update remain
    size[-1] += remain  # add the rest to the last bucket.
    assert (sum(size) == opt.mixSize)  # should add up to 1
    dat = torch.FloatTensor()
    tot = 0
    if singleFolder and os.path.exists(outf + "/mark"):
        print("Already generated before. Now exit.")
        dat = torch.load(outf + "/img.pth")
        return dat
    shutil.rmtree(outf, ignore_errors=True)
    mkdir(outf)

    if mixType == "pix":  # mix of images..

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        subfolderSize = 600
        for entry, s in zip(dataList, size):  # should sample it one by one

            print(entry.data, entry.folder, s)
            opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder

            opt.manualSeed = random.randint(1, 10000)  # fix seed
            random.seed(opt.manualSeed)
            torch.manual_seed(opt.manualSeed)

            # can take some transform defined by preprocess
            dataset = dset.ImageFolder(root=opt.dir, transform=entry.transform)
            dataloader = torch.utils.data.DataLoader(dataset,
                                                     batch_size=96,
                                                     shuffle=True,
                                                     num_workers=2)

            count = 0
            # should contain more image than one need
            assert (len(dataset) >= s)
            avg_img = get_avg(dataloader)

            for i, data in enumerate(dataloader, 0):
                img, _ = data
                for candidate in img:  # add images one by one
                    # The line below is special design for dup>1 case
                    image = candidate if entry.dup == 1 else avg_img
                    for kth in range(0, entry.dup):  # duplicated images...
                        if tot == 0:
                            dat.resize_(
                                opt.mixSize,
                                image.size(0) * image.size(1) * image.size(2))
                        if tot % subfolderSize == 0:
                            subfolder += 1
                            mkdir(outf + "/" + str(subfolder))
                        saveImage(
                            image, outf + "/" + str(subfolder) + "/" +
                            giveName(tot) + ".png")
                        dat[tot].fill_(0)
                        dat[tot] += image.resize_(image.nelement()) * 0.5 + 0.5
                        tot += 1
                        count += 1
                        if count == s:  # done copying
                            break
                    if count == s:  # done copying
                        break
                if count == s:  # done copying
                    break
        peek("Mix", os.path.basename(os.path.normpath(outf)), force=True)

        if singleFolder:
            torch.save(dat, outf + "/img.pth")
            torch.save([], outf + "/mark")

        return dat
    else:
        last = 0
        for entry, s in zip(dataList, size):  # should sample it one by one
            if entry.imageMode == 0:
                # no transformation, read features directly
                featureFile = g.default_feature_dir + entry.data + \
                    "/" + entry.folder + "_" + mixType + ".pth"

                featureM = torch.load(featureFile)

            else:
                # need transformation, no test
                opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder
                dataset = dset.ImageFolder(root=opt.dir,
                                           transform=entry.transform)
                dataloader = torch.utils.data.DataLoader(dataset,
                                                         batch_size=96,
                                                         shuffle=True,
                                                         num_workers=2)

                resnet = getattr(models, 'resnet34')(pretrained=True)
                print('Using resnet34 with pretrained weights.')
                resnet.cuda().eval()
                resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1,
                                               resnet.relu, resnet.maxpool,
                                               resnet.layer1, resnet.layer2,
                                               resnet.layer3, resnet.layer4)
                feature_conv, feature_smax, feature_class = [], [], []
                for img, _ in tqdm(dataloader):
                    input = Variable(img.cuda(), volatile=True)
                    fconv = resnet_feature(input)
                    fconv = fconv.mean(3).mean(2).squeeze()
                    flogit = resnet.fc(fconv)
                    fsmax = F.softmax(flogit)
                    feature_conv.append(fconv.data.cpu())
                    feature_class.append(flogit.data.cpu())
                    feature_smax.append(fsmax.data.cpu())
                feature_conv = torch.cat(feature_conv, 0)
                feature_class = torch.cat(feature_class, 0)
                feature_smax = torch.cat(feature_smax, 0)

                if mixType.find('conv') >= 0:
                    featureM = feature_conv
                elif mixType.find('smax') >= 0:
                    featureM = feature_smax
                elif mixType.find('class') >= 0:
                    featureM = feature_class
                else:
                    raise NotImplementedError

            randP = torch.randperm(len(featureM))  # random permutation
            if last == 0:
                dat.resize_(opt.mixSize, featureM.size(1))
            dat[last:last + s].copy_(featureM.index_select(0, randP[:s]))
            last += s

        torch.save(dat, outf + "/feature_" + mixType)
        return dat
Esempio n. 24
0
def getDat(opt, dataList, outf, mixType="pix", singleFolder=False):
    size = []
    remain = opt.mixSize
    for entry in dataList:
        size.append(int(entry.fraction * opt.mixSize))  # get the correct size
        remain -= size[-1]  # update remain
    size[-1] += remain  # add the rest to the last bucket.
    assert(sum(size) == opt.mixSize)  # should add up to 1
    dat = torch.FloatTensor()
    tot = 0
    if singleFolder and os.path.exists(outf + "/mark"):
        print("Already generated before. Now exit.")
        dat = torch.load(outf + "/img.pth")
        return dat
    shutil.rmtree(outf, ignore_errors=True)
    mkdir(outf)

    if mixType == "pix":  # mix of images..
        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        subfolderSize = 600
        for entry, s in zip(dataList, size):  # should sample it one by one

            print(entry.data, entry.folder, s)
            opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder

            opt.manualSeed = random.randint(1, 10000)  # fix seed
            random.seed(opt.manualSeed)
            torch.manual_seed(opt.manualSeed)

            # can take some transform defined by preprocess
            dataset = dset.ImageFolder(root=opt.dir, transform=entry.transform)
            dataloader = torch.utils.data.DataLoader(
                dataset, batch_size=96, shuffle=True, num_workers=2)

            count = 0
            # should contain more image than one need
            assert(len(dataset) >= s)
            avg_img = get_avg(dataloader)

            for i, data in enumerate(dataloader, 0):
                img, _ = data
                for candidate in img:  # add images one by one
                    # The line below is special design for dup>1 case
                    image = candidate if entry.dup == 1 else avg_img
                    for kth in range(0, entry.dup):  # duplicated images...
                        if tot == 0:
                            dat.resize_(opt.mixSize, image.size(
                                0) * image.size(1) * image.size(2))
                        if tot % subfolderSize == 0:
                            subfolder += 1
                            mkdir(outf + "/" + str(subfolder))
                        saveImage(image, outf + "/" + str(subfolder) +
                                  "/" + giveName(tot) + ".png")
                        dat[tot].fill_(0)
                        dat[tot] += image.resize_(image.nelement()) * 0.5 + 0.5
                        tot += 1
                        count += 1
                        if count == s:  # done copying
                            break
                    if count == s:  # done copying
                        break
                if count == s:  # done copying
                    break
        peek("Mix", os.path.basename(os.path.normpath(outf)), force=True)

        if singleFolder:
            torch.save(dat, outf + "/img.pth")
            torch.save([], outf + "/mark")

        return dat
    else:
        last = 0
        for entry, s in zip(dataList, size):  # should sample it one by one
            if entry.imageMode == 0:
                # no transformation, read features directly
                featureFile = g.default_feature_dir + entry.data + \
                    "/" + entry.folder + "_" + mixType + ".pth"

                featureM = torch.load(featureFile)

            else:
                # need transformation, no test
                opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder
                dataset = dset.ImageFolder(
                    root=opt.dir, transform=entry.transform)
                dataloader = torch.utils.data.DataLoader(
                    dataset, batch_size=96, shuffle=True, num_workers=2)

                resnet = getattr(models, 'resnet34')(pretrained=True)
                print('Using resnet34 with pretrained weights.')
                resnet.cuda().eval()
                resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                               resnet.maxpool, resnet.layer1,
                                               resnet.layer2, resnet.layer3, resnet.layer4)
                feature_conv, feature_smax, feature_class = [], [], []
                for img, _ in tqdm(dataloader):
                    input = Variable(img.cuda(), volatile=True)
                    fconv = resnet_feature(input)
                    fconv = fconv.mean(3).mean(2).squeeze()
                    flogit = resnet.fc(fconv)
                    fsmax = F.softmax(flogit)
                    feature_conv.append(fconv.data.cpu())
                    feature_class.append(flogit.data.cpu())
                    feature_smax.append(fsmax.data.cpu())
                feature_conv = torch.cat(feature_conv, 0)
                feature_class = torch.cat(feature_class, 0)
                feature_smax = torch.cat(feature_smax, 0)

                if mixType.find('conv') >= 0:
                    featureM = feature_conv
                elif mixType.find('smax') >= 0:
                    featureM = feature_smax
                elif mixType.find('class') >= 0:
                    featureM = feature_class
                else:
                    raise NotImplementedError

            randP = torch.randperm(len(featureM))  # random permutation
            if last == 0:
                dat.resize_(opt.mixSize, featureM.size(1))
            dat[last:last + s].copy_(featureM.index_select(0, randP[:s]))
            last += s

        torch.save(dat, outf + "/feature_" + mixType)
        return dat
Esempio n. 25
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 26
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 28
0
    timestart = time.perf_counter()
    z_buffer = np.zeros((width, height))
    for f in m.faces:
        v = np.array([map_to_screen(m.vertices[f[i][0]]) for i in range(3)])
        t = np.array([m.tex[f[i][1]] for i in range(3)]).T

        # texture mapping
        def shader(bc):
            tc = np.tensordot(t, bc, axes=(1, 2))
            tc = tc.transpose(1, 2, 0) * np.array(texture.shape[:2])
            tc = np.clip(tc.astype(np.uint32), [0, 0], [
                         texture.shape[0]-1,  texture.shape[1]-1])
            tc = tc.transpose(2, 0, 1)
            return texture[tc.tolist()]

        triangle(v, image, z_buffer, shader)

    timeend = time.perf_counter()
    print("Render image :: ", (timeend - timestart), "s")

    timestart = time.perf_counter()
    utils.saveImage("out/6_camera.jpg", image)
    utils.saveImage("out/6_camera_zbuffer.jpg", z_buffer / depth)
    print(z_buffer.min(), z_buffer.max())
    timeend = time.perf_counter()
    print("Save to file :: ", (timeend - timestart), "s")

    window = MainWindow(width, height)
    window.showImage(image)
Esempio n. 29
0
    args = parser.parse_args()

    labelPath = args.i
    outputPath = os.path.dirname(args.i)

    scene = objs.Scene()
    utils.loadLabelByJson(labelPath, scene)
    scene.normalize()

    mapSize = [512, 1024, 3]

    #edgeMap = utils.genLayoutEdgeMap(scene, mapSize)
    #utils.saveImage(edgeMap, os.path.join(outputPath, 'edge.png'))

    #normalMap = utils.genLayoutNormalMap(scene, mapSize)
    #utils.saveImage(normalMap, os.path.join(outputPath, 'normal.png'))

    #depthMap = utils.genLayoutDepthMap(scene, mapSize)
    #utils.saveDepth(depthMap, os.path.join(outputPath, 'depth.png'))

    #obj2dMap = utils.genLayoutObj2dMap(scene, mapSize)
    #utils.saveImage(obj2dMap, os.path.join(outputPath, 'obj2d.png'))

    fcMap = utils.genLayoutFloorCeilingMap(scene, [512, 1024])
    utils.saveImage(fcMap, os.path.join(outputPath, 'fcmap.png'))

    corMap = utils.genLayoutCornerMap(scene, [512, 1024], dilat=4, blur=20)
    utils.saveImage(corMap, os.path.join(outputPath, 'cor.png'))

    edgMap = utils.genLayoutEdgeMap(scene, [512, 1024, 3], dilat=4, blur=20)
    utils.saveImage(edgMap, os.path.join(outputPath, 'edg_b.png'))
def folder_sampler(opt):
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outTrueA = 'true/'
    opt.outTrueB = 'true_test/'
    opt.outTrueC = 'true_test2/'
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    assert (opt.batchSize % 3 == 0)

    print_prop(opt)
    opt.outTrueA = opt.outf + "samples/" + opt.data + "/" + opt.outTrueA
    opt.outTrueB = opt.outf + "samples/" + opt.data + "/" + opt.outTrueB
    opt.outTrueC = opt.outf + "samples/" + opt.data + "/" + opt.outTrueC
    folderList = [opt.outTrueA, opt.outTrueB, opt.outTrueC]

    if (os.path.exists(opt.outTrueC)):
        if (os.path.exists(opt.outTrueC + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            for f in folderList:
                saveFeature(f, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    mkdir(opt.outTrueA)
    mkdir(opt.outTrueB)
    mkdir(opt.outTrueC)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset, dataloader = getDataSet(opt)

    assert (len(dataset) >= opt.sampleSize * 3)

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    subfolder = -1
    splits = len(folderList)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        if i % splits == 0:
            subfolder += 1
        for j in range(0, len(img)):
            curFolder = folderList[j % splits]
            mkdir(curFolder + str(subfolder))
            if iter >= splits * opt.sampleSize:
                break
            saveImage(
                img[j],
                curFolder + str(subfolder) + "/" + giveName(iter) + ".png")
            iter += 1
        if iter >= splits * opt.sampleSize:
            break

    for f in folderList:
        saveFeature(f, opt, opt.feature_model)
        peek(opt.data, os.path.relpath(f, opt.outf + "samples/" + opt.data))

    for folder in folderList:
        with open(folder + "/mark", "w") as f:
            f.write("")
def WGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lrD = 0.00005
    opt.lrG = 0.00005
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.clamp_lower = -0.01
    opt.clamp_upper = 0.01
    opt.Diters = 5
    opt.n_extra_layers = 0
    opt.outf = g.default_model_dir + "WGAN/"
    opt.adam = False

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.outf is None:
        opt.outf = 'samples'
    os.system('mkdir {0}'.format(opt.outf))

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3
    n_extra_layers = int(opt.n_extra_layers)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = WGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)

    netG.apply(weights_init)
    if opt.netG != '':  # load checkpoint if needed
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = WGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
    netD.apply(weights_init)

    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    one = torch.FloatTensor([1])
    mone = one * -1

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        input = input.cuda()
        one, mone = one.cuda(), mone.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    # setup optimizer
    if opt.adam:
        optimizerD = optim.Adam(
            netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999))
        optimizerG = optim.Adam(
            netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999))
    else:
        optimizerD = optim.RMSprop(netD.parameters(), lr=opt.lrD)
        optimizerG = optim.RMSprop(netG.parameters(), lr=opt.lrG)

    gen_iterations = 0
    for epoch in range(opt.niter):
        data_iter = iter(dataloader)
        i = 0
        while i < len(dataloader):
            ############################
            # (1) Update D network
            ###########################
            for p in netD.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update

            # train the discriminator Diters times
            if gen_iterations < 25 or gen_iterations % 500 == 0:
                Diters = 100
            else:
                Diters = opt.Diters
            j = 0
            while j < Diters and i < len(dataloader):
                j += 1

                # clamp parameters to a cube
                for p in netD.parameters():
                    p.data.clamp_(opt.clamp_lower, opt.clamp_upper)

                data = data_iter.next()
                i += 1

                # train with real
                real_cpu, _ = data
                netD.zero_grad()
                batch_size = real_cpu.size(0)

                if opt.cuda:
                    real_cpu = real_cpu.cuda()
                input.resize_as_(real_cpu).copy_(real_cpu)
                inputv = Variable(input)

                errD_real = netD(inputv)
                errD_real.backward(one)

                # train with fake
                noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
                noisev = Variable(noise, volatile=True)  # totally freeze netG
                fake = Variable(netG(noisev).data)
                inputv = fake
                errD_fake = netD(inputv)
                errD_fake.backward(mone)
                errD = errD_real - errD_fake
                optimizerD.step()

            ############################
            # (2) Update G network
            ###########################
            for p in netD.parameters():
                p.requires_grad = False  # to avoid computation
            netG.zero_grad()
            # in case our last batch was the tail batch of the dataloader,
            # make sure we feed a full batch of noise
            noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
            noisev = Variable(noise)
            fake = netG(noisev)
            errG = netD(fake)
            errG.backward(one)
            optimizerG.step()
            gen_iterations += 1

            print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
                  % (epoch, opt.niter, i, len(dataloader), gen_iterations,
                     errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0]))
            if gen_iterations % 50 == 0:
                saveImage(real_cpu, '{0}/real_samples.png'.format(opt.outf))
                fake = netG(Variable(fixed_noise, volatile=True))
                saveImage(
                    fake.data, '{0}/fake_samples_{1}.png'.format(opt.outf, gen_iterations))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '{0}/netG_epoch_{1}.pth'.format(opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '{0}/netD_epoch_{1}.pth'.format(opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 32
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.be1hmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    1 = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, 1, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (1) x 64 x 64
                nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, 1, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = dista1e(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 128
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.to(device)
        netG.to(device)
        criterion.to(device)
        input, label = input.to(device), label.to(device)
        noise, fixed_noise = noise.to(device), fixed_noise.to(device)

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 34
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, nc, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, nc, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = distance(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Esempio n. 35
0
    image = utils.createImage(width, height)
    m = Model("obj/african_head.obj")
    timeend = time.perf_counter()
    print("Read model :: ", (timeend - timestart), "s")

    light_dir = np.array([0, 0, -1])

    timestart = time.perf_counter()
    z_buffer = np.zeros((width, height))
    for i, f in enumerate(m.faces):
        v = np.array([map_to_screen(m.vertices[f[i][0]]) for i in range(3)])

        normal = np.cross(v[2] - v[0], v[1] - v[0])
        normal /= np.linalg.norm(normal)
        intensity = abs(np.dot(normal, light_dir))

        # triangle_line_sweep(v, image, utils.WHITE * intensity)
        triangle(v, image, z_buffer, lambda bc: utils.WHITE * intensity)

    timeend = time.perf_counter()
    print("Render image :: ", (timeend - timestart), "s")

    timestart = time.perf_counter()
    utils.saveImage("out/4_mesh_light.jpg", image)
    utils.saveImage("out/4_mesh_light_zbuffer.jpg", z_buffer / depth)
    timeend = time.perf_counter()
    print("Save to file :: ", (timeend - timestart), "s")

    window = MainWindow(width, height)
    window.showImage(image)
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):
        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid())
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, opt.niter, i, len(dataloader), errD.data[0],
                   errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data,
                          '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def folder_sampler(opt):
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outTrueA = 'true/'
    opt.outTrueB = 'true_test/'
    opt.outTrueC = 'true_test2/'
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    assert(opt.batchSize % 3 == 0)

    print_prop(opt)
    opt.outTrueA = opt.outf + "samples/" + opt.data + "/" + opt.outTrueA
    opt.outTrueB = opt.outf + "samples/" + opt.data + "/" + opt.outTrueB
    opt.outTrueC = opt.outf + "samples/" + opt.data + "/" + opt.outTrueC
    folderList = [opt.outTrueA, opt.outTrueB, opt.outTrueC]

    if (os.path.exists(opt.outTrueC)):
        if (os.path.exists(opt.outTrueC + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            for f in folderList:
                saveFeature(f, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    mkdir(opt.outTrueA)
    mkdir(opt.outTrueB)
    mkdir(opt.outTrueC)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset, dataloader = getDataSet(opt)

    assert(len(dataset) >= opt.sampleSize * 3)

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    subfolder = -1
    splits = len(folderList)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        if i % splits == 0:
            subfolder += 1
        for j in range(0, len(img)):
            curFolder = folderList[j % splits]
            mkdir(curFolder + str(subfolder))
            if iter >= splits * opt.sampleSize:
                break
            saveImage(img[j], curFolder + str(subfolder) +
                      "/" + giveName(iter) + ".png")
            iter += 1
        if iter >= splits * opt.sampleSize:
            break

    for f in folderList:
        saveFeature(f, opt, opt.feature_model)
        peek(opt.data, os.path.relpath(f, opt.outf + "samples/" + opt.data))

    for folder in folderList:
        with open(folder + "/mark", "w") as f:
            f.write("")
Esempio n. 38
0
def run(conf):

    pp = pprint.PrettyPrinter()
    # pp.pprint(conf)

    # Make sure the global configuration is in place
    utils.run_global_visit_configuration(conf)

    visitConf = utils.getValueForKeyPath(conf,'postprocessing.tracks.visit')
    if not visitConf:
        print "No configuration for visuals. Nothing to do."
        return 0

    # Set up background gradient, axis labels etc.
    utils.setAnnotations(conf,'postprocessing.tracks.visit.annotationAttributes')

    # Set the view straight
    utils.setView(conf,'postprocessing.tracks.visit.view')

    # Plot the map data
    utils.plotMapdata(conf,'postprocessing.tracks.visit.map')

    # Plot the tracks
    trackPlotConf = utils.getValueForKeyPath(conf,'postprocessing.tracks.visit.track')
    # pp.pprint(trackPlotConf)

    currentDirectory = os.path.abspath(os.getcwd())
    os.chdir(conf['tracks_dir'])

    if trackPlotConf:

        # Save value of legend flag
        legendFlag = trackPlotConf['PseudocolorAttributes']['legendFlag']

        # Plot the Tracks
        # track_pattern = conf['tracks_dir'] + "/*-track_*.vtk"

        track_pattern =  "*-track_*.vtk"
        list = sorted(glob.glob(track_pattern))
        print "Looking with pattern " + track_pattern
        print "Found %d track files." % len(list)
        count = 0;
        for trackFile in list:

            # add plot
            # trackFile = conf['tracks_dir'] + os.path.sep + fname

            # plot the legend for the first one only
            if (count == 1) and legendFlag:
                trackPlotConf['PseudocolorAttributes']['legendFlag'] = 0
                # pp.pprint(trackPlotConf)

            # Plot the actual track data
            file = conf['tracks_dir'] + os.path.sep + trackFile
            print "Adding plot for " + file
            utils.addPseudocolorPlot(file,trackPlotConf)

            count = count + 1

            # in case the script is being debugged, exit the script
            # after 10 tracks. This could be configured
            # if getValueForKeyPath(conf,'postprocessing.debugVisitScript') and count > 10

        # Restore flag value
        trackPlotConf['PseudocolorAttributes']['legendFlag'] = legendFlag
        # pp.pprint(trackPlotConf)

    print "Drawing plots"
    visit.DrawPlots()

    print "Saving image to %s" % os.getcwd()
    utils.saveImage("tracks",0)

    os.chdir(currentDirectory)

    return
    # remove offset between image and lbl size
    offset = (sizeOfImageCellResampled[0]-sizeOfAnnotatedCellsResampled[0])//2
    extractedPatch = extractedPatch[offset : extractedPatch.shape[0] - offset, offset : extractedPatch.shape[1] - offset,:]
    extractedResampledBigPatch = np.asarray(np.round(extractedPatch * 255.), np.uint8) #shape: (1536, 2048, 3)

    assert extractedResampledBigPatch.shape[0:2] == bigPatchResults.shape[1:3], "Segmentation patch result size unequal overlapping rgb image"

    if savePredictionNumpy:
        np.save(resultspath + '/extractedResampledBigPatch.npy', extractedResampledBigPatch)
        np.save(resultspath + '/finalBigPatchPrediction.npy', finalBigPatchPrediction)


# save image and prediction in different modes:

saveImage(extractedResampledBigPatch, resultspath + '/Coord_'+str(patchCenterCoordinatesRaw[0])+'_'+str(patchCenterCoordinatesRaw[1])+'_OrigPatch.png', figSize)

# finalBigPatchPrediction[finalBigPatchPrediction == 7] = 0

savePredictionResultsWithoutDilation(finalBigPatchPrediction, resultspath + '/Coord_'+str(patchCenterCoordinatesRaw[0])+'_'+str(patchCenterCoordinatesRaw[1])+'_Prediction1_output.png', figSize)

# savePredictionOverlayResults(extractedResampledBigPatch, finalBigPatchPrediction, resultspath + '/Coord_'+str(patchCenterCoordinatesRaw[0])+'_'+str(patchCenterCoordinatesRaw[1])+'_Prediction3_anOverlay.png', figSize, alpha=0.4)

logger.info('########### POSTPROCESSING STARTS...###########')

finalBigPatchPrediction[finalBigPatchPrediction == 7] = 0


# ################# REMOVING TOO SMALL CONNECTED REGIONS ################
# Tuft
labeledTubuli, numberTubuli = label(np.asarray(finalBigPatchPrediction == 3, np.uint8), structure)  # datatype of 'labeledTubuli': int32
Esempio n. 40
0
if __name__ == "__main__":
    timestart = time.perf_counter()
    image = utils.createImage(width, height)
    m = Model("obj/african_head.obj")
    timeend = time.perf_counter()
    print("Read model :: ", (timeend - timestart), "s")

    light_dir = np.array([0, 0, -1])

    timestart = time.perf_counter()
    for i, f in enumerate(m.faces):
        v = np.array([map_to_screen(m.vertices[f[i][0]]) for i in range(3)])

        normal = np.cross(v[2] - v[0], v[1] - v[0])
        normal /= np.linalg.norm(normal)
        intensity = abs(np.dot(normal, light_dir))

        # triangle_line_sweep(v, image, utils.WHITE * intensity)
        triangle(v, image, shader=lambda bc: utils.WHITE * intensity)

    timeend = time.perf_counter()
    print("Render image :: ", (timeend - timestart), "s")

    timestart = time.perf_counter()
    utils.saveImage("out/4_mesh_light.jpg", image)
    timeend = time.perf_counter()
    print("Save to file :: ", (timeend - timestart), "s")

    window = MainWindow(width, height)
    window.showImage(image)
Esempio n. 41
0
def run(conf):

    pp = pprint.PrettyPrinter()
    # pp.pprint(conf)

    # Make sure the global configuration is in place
    utils.run_global_visit_configuration(conf)

    visitConf = utils.getValueForKeyPath(conf, 'postprocessing.tracks.visit')
    if not visitConf:
        print "No configuration for visuals. Nothing to do."
        return 0

    # Set up background gradient, axis labels etc.
    utils.setAnnotations(conf,
                         'postprocessing.tracks.visit.annotationAttributes')

    # Set the view straight
    utils.setView(conf, 'postprocessing.tracks.visit.view')

    # Plot the map data
    utils.plotMapdata(conf, 'postprocessing.tracks.visit.map')

    # Plot the tracks
    trackPlotConf = utils.getValueForKeyPath(
        conf, 'postprocessing.tracks.visit.track')
    # pp.pprint(trackPlotConf)

    currentDirectory = os.path.abspath(os.getcwd())
    os.chdir(conf['tracks_dir'])

    if trackPlotConf:

        # Save value of legend flag
        legendFlag = trackPlotConf['PseudocolorAttributes']['legendFlag']

        # Plot the Tracks
        # track_pattern = conf['tracks_dir'] + "/*-track_*.vtk"

        track_pattern = "*-track_*.vtk"
        list = sorted(glob.glob(track_pattern))
        print "Looking with pattern " + track_pattern
        print "Found %d track files." % len(list)
        count = 0
        for trackFile in list:

            # add plot
            # trackFile = conf['tracks_dir'] + os.path.sep + fname

            # plot the legend for the first one only
            if (count == 1) and legendFlag:
                trackPlotConf['PseudocolorAttributes']['legendFlag'] = 0
                # pp.pprint(trackPlotConf)

            # Plot the actual track data
            file = conf['tracks_dir'] + os.path.sep + trackFile
            print "Adding plot for " + file
            utils.addPseudocolorPlot(file, trackPlotConf)

            count = count + 1

            # in case the script is being debugged, exit the script
            # after 10 tracks. This could be configured
            # if getValueForKeyPath(conf,'postprocessing.debugVisitScript') and count > 10

        # Restore flag value
        trackPlotConf['PseudocolorAttributes']['legendFlag'] = legendFlag
        # pp.pprint(trackPlotConf)

    print "Drawing plots"
    visit.DrawPlots()

    print "Saving image to %s" % os.getcwd()
    utils.saveImage("tracks", 0)

    os.chdir(currentDirectory)

    return