Exemple #1
0
def peek(dat, folder, force=False):
    g = Globals()
    outf = g.default_repo_dir

    mkdir(outf + "peek")
    mkdir(outf + "peek/" + dat)

    print("\nPeeking " + folder + " for " + dat)
    dir = outf + "samples/" + dat + "/" + folder
    print("dir", dir)

    if (not force) and (os.path.exists(outf + 'peek/%s/%s.png' %
                                       (dat, folder.replace("/", "_")))):
        print("Already peeked before. Now exit.")
        return

    dataset = dset.ImageFolder(root=dir,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=96,
                                             shuffle=True,
                                             num_workers=2)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        saveImage(img,
                  outf + 'peek/%s/%s.png' % (dat, folder.replace("/", "_")),
                  nrow=12)
        break
Exemple #2
0
 def __init__(self, str_trddate=STR_TODAY, download_winddata_mark=0):
     self.gl = Globals(str_trddate, download_winddata_mark)
     self.list_acctidsbymxz = self.gl.list_acctidsbymxz
     df_fmtted_wssdata_today = pd.DataFrame(
         self.gl.col_fmtted_wssdata.find({'DataDate': self.gl.str_today}, {'_id': 0})
     )
     self.dict_fmtted_wssdata_of_today = df_fmtted_wssdata_today.set_index('WindCode').to_dict()
Exemple #3
0
 def task_at_0840():
     str_today = datetime.today().strftime('%Y%m%d')
     task1 = Globals(str_today, download_winddata_mark=1)
     task2 = PostTrdMng(str_today, download_winddata_mark=0)
     task2.run()
     task3 = PreTrdMng(str_today, download_winddata_mark=0)
     task3.run()
    def __init__(self):
        self.gl = Globals()
        self.dict_acctidbyxxq2acctidbymxz = {}
        for _ in self.gl.col_acctinfo.find({'DataDate': self.gl.str_today}):
            if _['StrategiesAllocationByAcct']:
                list_strategies = _['StrategiesAllocationByAcct'].split(';')
                if 'AutoT0_IndexFuture' in list_strategies or 'AutoT0_SecurityLoan' in list_strategies:
                    self.dict_acctidbyxxq2acctidbymxz.update(
                        {_['AcctIDByXuXiaoQiang4Trd']: _['AcctIDByMXZ']})
            else:
                continue

        # 上传kdb原始委托数据
        self.fpath_order_from_kdb_102_116 = f'D:/projects/autot0/data/kdb_102_116/q_{self.gl.str_today}.csv'
        with open(self.fpath_order_from_kdb_102_116, encoding='utf-8') as f:
            list_dicts_order_from_kdb = []
            list_list_datalines = f.readlines()
            list_fields = list_list_datalines[0].strip().split(',')
            list_list_values = [
                _.strip().split(',') for _ in list_list_datalines[1:]
            ]
            for list_value in list_list_values:
                dict_order_from_kdb = dict(zip(list_fields, list_value))
                dict_order_from_kdb['DataDate'] = self.gl.str_today
                if dict_order_from_kdb[
                        'accountname'] not in self.dict_acctidbyxxq2acctidbymxz:
                    dict_order_from_kdb['AcctIDByMXZ'] = ''
                else:
                    dict_order_from_kdb['AcctIDByMXZ'] = (
                        self.dict_acctidbyxxq2acctidbymxz[
                            dict_order_from_kdb['accountname']])
                list_dicts_order_from_kdb.append(dict_order_from_kdb)
            self.gl.col_trade_rawdata_order_from_kdb_102_116.delete_many(
                {'DataDate': self.gl.str_today})
            if list_dicts_order_from_kdb:
                self.gl.col_trade_rawdata_order_from_kdb_102_116.insert_many(
                    list_dicts_order_from_kdb)
Exemple #5
0
from globals import Globals
from utils import mkdir, saveImage
import random
import torch
from torch.autograd import Variable
import torchvision.datasets as dset
import shutil
import torchvision.transforms as transforms
from sampler.peek import peek
import os
from tqdm import tqdm
from torch import nn
import torch.nn.functional as F
import torchvision.models as models

g = Globals()


class Ent():
    def __init__(self,
                 fraction,
                 data,
                 folder,
                 transform=None,
                 dup=1,
                 imageMode=0):
        self.fraction = fraction
        self.data = data
        self.folder = folder
        self.dup = dup
        self.imageMode = imageMode
Exemple #6
0
from globals import Globals
globals = Globals(__file__,
                  successStatus=True,
                  settingStatus=True,
                  debugStatus=True,
                  warningStatus=True,
                  failureStatus=True,
                  errorStatus=True)
import PythonFrameworkFlask
app = PythonFrameworkFlask.app
api = PythonFrameworkFlask.api

if __name__ == '__main__':
    app.run(debug=True)
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):
        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid())
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, opt.niter, i, len(dataloader), errD.data[0],
                   errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data,
                          '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Exemple #8
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Exemple #9
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.be1hmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    1 = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, 1, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (1) x 64 x 64
                nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, 1, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = dista1e(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def WGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lrD = 0.00005
    opt.lrG = 0.00005
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.clamp_lower = -0.01
    opt.clamp_upper = 0.01
    opt.Diters = 5
    opt.n_extra_layers = 0
    opt.outf = g.default_model_dir + "WGAN/"
    opt.adam = False

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.outf is None:
        opt.outf = 'samples'
    os.system('mkdir {0}'.format(opt.outf))

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3
    n_extra_layers = int(opt.n_extra_layers)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = WGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)

    netG.apply(weights_init)
    if opt.netG != '':  # load checkpoint if needed
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = WGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
    netD.apply(weights_init)

    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    one = torch.FloatTensor([1])
    mone = one * -1

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        input = input.cuda()
        one, mone = one.cuda(), mone.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    # setup optimizer
    if opt.adam:
        optimizerD = optim.Adam(
            netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999))
        optimizerG = optim.Adam(
            netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999))
    else:
        optimizerD = optim.RMSprop(netD.parameters(), lr=opt.lrD)
        optimizerG = optim.RMSprop(netG.parameters(), lr=opt.lrG)

    gen_iterations = 0
    for epoch in range(opt.niter):
        data_iter = iter(dataloader)
        i = 0
        while i < len(dataloader):
            ############################
            # (1) Update D network
            ###########################
            for p in netD.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update

            # train the discriminator Diters times
            if gen_iterations < 25 or gen_iterations % 500 == 0:
                Diters = 100
            else:
                Diters = opt.Diters
            j = 0
            while j < Diters and i < len(dataloader):
                j += 1

                # clamp parameters to a cube
                for p in netD.parameters():
                    p.data.clamp_(opt.clamp_lower, opt.clamp_upper)

                data = data_iter.next()
                i += 1

                # train with real
                real_cpu, _ = data
                netD.zero_grad()
                batch_size = real_cpu.size(0)

                if opt.cuda:
                    real_cpu = real_cpu.cuda()
                input.resize_as_(real_cpu).copy_(real_cpu)
                inputv = Variable(input)

                errD_real = netD(inputv)
                errD_real.backward(one)

                # train with fake
                noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
                noisev = Variable(noise, volatile=True)  # totally freeze netG
                fake = Variable(netG(noisev).data)
                inputv = fake
                errD_fake = netD(inputv)
                errD_fake.backward(mone)
                errD = errD_real - errD_fake
                optimizerD.step()

            ############################
            # (2) Update G network
            ###########################
            for p in netD.parameters():
                p.requires_grad = False  # to avoid computation
            netG.zero_grad()
            # in case our last batch was the tail batch of the dataloader,
            # make sure we feed a full batch of noise
            noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
            noisev = Variable(noise)
            fake = netG(noisev)
            errG = netD(fake)
            errG.backward(one)
            optimizerG.step()
            gen_iterations += 1

            print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
                  % (epoch, opt.niter, i, len(dataloader), gen_iterations,
                     errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0]))
            if gen_iterations % 50 == 0:
                saveImage(real_cpu, '{0}/real_samples.png'.format(opt.outf))
                fake = netG(Variable(fixed_noise, volatile=True))
                saveImage(
                    fake.data, '{0}/fake_samples_{1}.png'.format(opt.outf, gen_iterations))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '{0}/netG_epoch_{1}.pth'.format(opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '{0}/netD_epoch_{1}.pth'.format(opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Exemple #11
0
from flask import Flask
from globals import Config, Globals

app = Flask(__name__)
app.config.from_object(Config)

myGlobal = Globals()
myGlobal.app = app

import prog.routes


@app.route('/', methods=['GET', 'POST'])
@app.route('/main_menu', methods=['GET', 'POST'])
def main_menu():
    return prog.routes.main_menu(myGlobal)


@app.route('/create', methods=['GET', 'POST'])
def create():
    return prog.routes.create()


@app.route('/load', methods=['GET', 'POST'])
def load():
    return prog.routes.load()


@app.route('/getHnum', methods=['GET', 'POST'])
def solve():
    return prog.routes.solve()
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 128
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.to(device)
        netG.to(device)
        criterion.to(device)
        input, label = input.to(device), label.to(device)
        noise, fixed_noise = noise.to(device), fixed_noise.to(device)

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Exemple #13
0
@author: root
'''

import os, time, redis, pickle, face_recognition

from flask import Flask, request
from werkzeug.utils import secure_filename

from globals import Globals

app = Flask(__name__)
con = redis.StrictRedis(connection_pool=redis.ConnectionPool(
    decode_responses=True))

globalsCur = Globals(__file__, 1)
imgEncKey = globalsCur.key("imgEnc")


@app.route("/")
def root():
    return ""


# http://192.168.0.78:5000/face/upload
@app.route("/face/upload", methods=["GET", "POST"])
def upload():
    if request.method == "GET":
        return """
        <form method="POST" enctype="multipart/form-data" >
            <input type="file" name="file" /><br /><br />
Exemple #14
0
def saveFeature(imgFolder, opt, model='resnet34', workers=4, batch_size=64):
    '''
        model: inception_v3, vgg13, vgg16, vgg19, resnet18, resnet34,
               resnet50, resnet101, or resnet152
    '''
    g = Globals()

    mkdir(g.default_feature_dir + opt.data)
    feature_dir = g.default_feature_dir + opt.data + "/" + lastFolder(imgFolder)
    
    conv_path = '{}_{}_conv.pth'.format(feature_dir, model)
    class_path = '{}_{}_class.pth'.format(feature_dir, model)
    smax_path = '{}_{}_smax.pth'.format(feature_dir, model)

    if (os.path.exists(conv_path) and  os.path.exists(class_path) and os.path.exists(class_path)):
        print("Feature already generated before. Now pass.")
        return

    if hasattr(opt, 'feat_model') and opt.feat_model is not None:
        model = opt.feat_model
    if model == 'vgg' or model == 'vgg16':
        vgg = models.vgg16(pretrained=True).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving vgg features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = vgg.features(input)
            fconv_out = fconv.mean(3).mean(2).squeeze()
            fconv = fconv.view(fconv.size(0), -1)
            flogit = vgg.classifier(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv_out.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    elif model.find('resnet') >= 0:
        if model == 'resnet34_cifar':
            # Please load your own model. Example here:
            # c = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/gan-resnet-34.t7')
            # resnet = c['net']
            pass
            print('Using resnet34 trained on cifar10.')
            raise NotImplementedError()

        elif model == 'resnet34_random':
            # Please load your own model. Example here:
            # resnet = torch.load(
            #     '/home/gh349/xqt/wide-resnet.pytorch/checkpoint/cifar10/random_resnet34.t7')
            pass
            print('Using resnet34 with random weights.')
            raise NotImplementedError()

        else:
            resnet = getattr(models, 'resnet34')(pretrained=True)
            print('Using resnet34 with pretrained weights.')

        resnet.cuda().eval()
        resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1,
                                       resnet.layer2, resnet.layer3, resnet.layer4)
        input = Variable(torch.FloatTensor().cuda())

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = resnet_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = resnet.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

        mkdir(g.default_feature_dir)
        feature_dir = g.default_feature_dir + \
            opt.data + "/" + lastFolder(imgFolder)
        mkdir(g.default_feature_dir + opt.data)

        torch.save(feature_conv, feature_dir + '_' + model + '_conv.pth')
        torch.save(feature_class, feature_dir + '_' + model + '_class.pth')
        torch.save(feature_smax, feature_dir + '_' + model + '_smax.pth')
        return feature_conv, feature_class, feature_smax

    elif model == 'inception' or model == 'inception_v3':
        inception = models.inception_v3(
            pretrained=True, transform_input=False).cuda().eval()
        inception_feature = nn.Sequential(inception.Conv2d_1a_3x3,
                                          inception.Conv2d_2a_3x3,
                                          inception.Conv2d_2b_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Conv2d_3b_1x1,
                                          inception.Conv2d_4a_3x3,
                                          nn.MaxPool2d(3, 2),
                                          inception.Mixed_5b,
                                          inception.Mixed_5c,
                                          inception.Mixed_5d,
                                          inception.Mixed_6a,
                                          inception.Mixed_6b,
                                          inception.Mixed_6c,
                                          inception.Mixed_6d,
                                          inception.Mixed_7a,
                                          inception.Mixed_7b,
                                          inception.Mixed_7c,
                                          ).cuda().eval()

        trans = transforms.Compose([
            transforms.Scale(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        dataset = dset.ImageFolder(root=imgFolder, transform=trans)
        dataloader = torch.utils.data.DataLoader(
            dataset, batch_size=batch_size, num_workers=int(workers),
            shuffle=False)

        print('saving resnet features:')
        feature_conv, feature_smax, feature_class = [], [], []
        for img, _ in tqdm(dataloader):
            input = Variable(img.cuda(), volatile=True)
            fconv = inception_feature(input)
            fconv = fconv.mean(3).mean(2).squeeze()
            flogit = inception.fc(fconv)
            fsmax = F.softmax(flogit)
            feature_conv.append(fconv.data.cpu())
            feature_class.append(flogit.data.cpu())
            feature_smax.append(fsmax.data.cpu())
        feature_conv = torch.cat(feature_conv, 0)
        feature_class = torch.cat(feature_class, 0)
        feature_smax = torch.cat(feature_smax, 0)

    else:
        raise NotImplementedError


    torch.save(feature_conv, '{}_{}_conv.pth'.format(feature_dir, model))
    torch.save(feature_class, '{}_{}_class.pth'.format(feature_dir, model))
    torch.save(feature_smax, '{}_{}_smax.pth'.format(feature_dir, model))
    return feature_conv, feature_class, feature_smax
import threading
from threading import Thread
from globals import Globals
from device_info import DeviceInfo
import json
from heartbeat import HeartBeat
import time
from event import Event

if __name__ == '__main__':

    Globals()

    device = DeviceInfo(
        device_id='10', device_name='workstation 10', notes='I don not have any notes!')
    print(device.toJSON())

    while(True):

        hb = HeartBeat('10',device)
        thread = hb.run_in_background()
        thread.join()
        print(hb.toJSON())
        time.sleep(10)